[{"data":1,"prerenderedAt":9592},["ShallowReactive",2],{"/en-us/blog/categories/engineering/":3,"navigation-en-us":21,"banner-en-us":439,"footer-en-us":451,"engineering-category-page-en-us":662},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"seo":8,"content":11,"config":12,"_id":15,"_type":16,"title":9,"_source":17,"_file":18,"_stem":19,"_extension":20},"/en-us/blog/categories/engineering","categories",false,"",{"title":9,"description":10},"Engineering","Browse articles related to Engineering on the GitLab Blog",{"name":9},{"template":13,"slug":14,"hide":6},"BlogCategory","engineering","content:en-us:blog:categories:engineering.yml","yaml","content","en-us/blog/categories/engineering.yml","en-us/blog/categories/engineering","yml",{"_path":22,"_dir":23,"_draft":6,"_partial":6,"_locale":7,"data":24,"_id":435,"_type":16,"title":436,"_source":17,"_file":437,"_stem":438,"_extension":20},"/shared/en-us/main-navigation","en-us",{"logo":25,"freeTrial":30,"sales":35,"login":40,"items":45,"search":376,"minimal":407,"duo":426},{"config":26},{"href":27,"dataGaName":28,"dataGaLocation":29},"/","gitlab logo","header",{"text":31,"config":32},"Get free trial",{"href":33,"dataGaName":34,"dataGaLocation":29},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com&glm_content=default-saas-trial/","free trial",{"text":36,"config":37},"Talk to sales",{"href":38,"dataGaName":39,"dataGaLocation":29},"/sales/","sales",{"text":41,"config":42},"Sign in",{"href":43,"dataGaName":44,"dataGaLocation":29},"https://gitlab.com/users/sign_in/","sign in",[46,90,186,191,297,357],{"text":47,"config":48,"cards":50,"footer":73},"Platform",{"dataNavLevelOne":49},"platform",[51,57,65],{"title":47,"description":52,"link":53},"The most comprehensive AI-powered DevSecOps Platform",{"text":54,"config":55},"Explore our Platform",{"href":56,"dataGaName":49,"dataGaLocation":29},"/platform/",{"title":58,"description":59,"link":60},"GitLab Duo (AI)","Build software faster with AI at every stage of development",{"text":61,"config":62},"Meet GitLab Duo",{"href":63,"dataGaName":64,"dataGaLocation":29},"/gitlab-duo/","gitlab duo ai",{"title":66,"description":67,"link":68},"Why GitLab","10 reasons why Enterprises choose GitLab",{"text":69,"config":70},"Learn more",{"href":71,"dataGaName":72,"dataGaLocation":29},"/why-gitlab/","why gitlab",{"title":74,"items":75},"Get started with",[76,81,86],{"text":77,"config":78},"Platform Engineering",{"href":79,"dataGaName":80,"dataGaLocation":29},"/solutions/platform-engineering/","platform engineering",{"text":82,"config":83},"Developer Experience",{"href":84,"dataGaName":85,"dataGaLocation":29},"/developer-experience/","Developer experience",{"text":87,"config":88},"MLOps",{"href":89,"dataGaName":87,"dataGaLocation":29},"/topics/devops/the-role-of-ai-in-devops/",{"text":91,"left":92,"config":93,"link":95,"lists":99,"footer":168},"Product",true,{"dataNavLevelOne":94},"solutions",{"text":96,"config":97},"View all Solutions",{"href":98,"dataGaName":94,"dataGaLocation":29},"/solutions/",[100,125,147],{"title":101,"description":102,"link":103,"items":108},"Automation","CI/CD and automation to accelerate deployment",{"config":104},{"icon":105,"href":106,"dataGaName":107,"dataGaLocation":29},"AutomatedCodeAlt","/solutions/delivery-automation/","automated software delivery",[109,113,117,121],{"text":110,"config":111},"CI/CD",{"href":112,"dataGaLocation":29,"dataGaName":110},"/solutions/continuous-integration/",{"text":114,"config":115},"AI-Assisted Development",{"href":63,"dataGaLocation":29,"dataGaName":116},"AI assisted development",{"text":118,"config":119},"Source Code Management",{"href":120,"dataGaLocation":29,"dataGaName":118},"/solutions/source-code-management/",{"text":122,"config":123},"Automated Software Delivery",{"href":106,"dataGaLocation":29,"dataGaName":124},"Automated software delivery",{"title":126,"description":127,"link":128,"items":133},"Security","Deliver code faster without compromising security",{"config":129},{"href":130,"dataGaName":131,"dataGaLocation":29,"icon":132},"/solutions/security-compliance/","security and compliance","ShieldCheckLight",[134,137,142],{"text":135,"config":136},"Security & Compliance",{"href":130,"dataGaLocation":29,"dataGaName":135},{"text":138,"config":139},"Software Supply Chain Security",{"href":140,"dataGaLocation":29,"dataGaName":141},"/solutions/supply-chain/","Software supply chain security",{"text":143,"config":144},"Compliance & Governance",{"href":145,"dataGaLocation":29,"dataGaName":146},"/solutions/continuous-software-compliance/","Compliance and governance",{"title":148,"link":149,"items":154},"Measurement",{"config":150},{"icon":151,"href":152,"dataGaName":153,"dataGaLocation":29},"DigitalTransformation","/solutions/visibility-measurement/","visibility and measurement",[155,159,163],{"text":156,"config":157},"Visibility & Measurement",{"href":152,"dataGaLocation":29,"dataGaName":158},"Visibility and Measurement",{"text":160,"config":161},"Value Stream Management",{"href":162,"dataGaLocation":29,"dataGaName":160},"/solutions/value-stream-management/",{"text":164,"config":165},"Analytics & Insights",{"href":166,"dataGaLocation":29,"dataGaName":167},"/solutions/analytics-and-insights/","Analytics and insights",{"title":169,"items":170},"GitLab for",[171,176,181],{"text":172,"config":173},"Enterprise",{"href":174,"dataGaLocation":29,"dataGaName":175},"/enterprise/","enterprise",{"text":177,"config":178},"Small Business",{"href":179,"dataGaLocation":29,"dataGaName":180},"/small-business/","small business",{"text":182,"config":183},"Public Sector",{"href":184,"dataGaLocation":29,"dataGaName":185},"/solutions/public-sector/","public sector",{"text":187,"config":188},"Pricing",{"href":189,"dataGaName":190,"dataGaLocation":29,"dataNavLevelOne":190},"/pricing/","pricing",{"text":192,"config":193,"link":195,"lists":199,"feature":284},"Resources",{"dataNavLevelOne":194},"resources",{"text":196,"config":197},"View all resources",{"href":198,"dataGaName":194,"dataGaLocation":29},"/resources/",[200,233,256],{"title":201,"items":202},"Getting started",[203,208,213,218,223,228],{"text":204,"config":205},"Install",{"href":206,"dataGaName":207,"dataGaLocation":29},"/install/","install",{"text":209,"config":210},"Quick start guides",{"href":211,"dataGaName":212,"dataGaLocation":29},"/get-started/","quick setup checklists",{"text":214,"config":215},"Learn",{"href":216,"dataGaLocation":29,"dataGaName":217},"https://university.gitlab.com/","learn",{"text":219,"config":220},"Product documentation",{"href":221,"dataGaName":222,"dataGaLocation":29},"https://docs.gitlab.com/","product documentation",{"text":224,"config":225},"Best practice videos",{"href":226,"dataGaName":227,"dataGaLocation":29},"/getting-started-videos/","best practice videos",{"text":229,"config":230},"Integrations",{"href":231,"dataGaName":232,"dataGaLocation":29},"/integrations/","integrations",{"title":234,"items":235},"Discover",[236,241,246,251],{"text":237,"config":238},"Customer success stories",{"href":239,"dataGaName":240,"dataGaLocation":29},"/customers/","customer success stories",{"text":242,"config":243},"Blog",{"href":244,"dataGaName":245,"dataGaLocation":29},"/blog/","blog",{"text":247,"config":248},"Remote",{"href":249,"dataGaName":250,"dataGaLocation":29},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"text":252,"config":253},"TeamOps",{"href":254,"dataGaName":255,"dataGaLocation":29},"/teamops/","teamops",{"title":257,"items":258},"Connect",[259,264,269,274,279],{"text":260,"config":261},"GitLab Services",{"href":262,"dataGaName":263,"dataGaLocation":29},"/services/","services",{"text":265,"config":266},"Community",{"href":267,"dataGaName":268,"dataGaLocation":29},"/community/","community",{"text":270,"config":271},"Forum",{"href":272,"dataGaName":273,"dataGaLocation":29},"https://forum.gitlab.com/","forum",{"text":275,"config":276},"Events",{"href":277,"dataGaName":278,"dataGaLocation":29},"/events/","events",{"text":280,"config":281},"Partners",{"href":282,"dataGaName":283,"dataGaLocation":29},"/partners/","partners",{"backgroundColor":285,"textColor":286,"text":287,"image":288,"link":292},"#2f2a6b","#fff","Insights for the future of software development",{"altText":289,"config":290},"the source promo card",{"src":291},"/images/navigation/the-source-promo-card.svg",{"text":293,"config":294},"Read the latest",{"href":295,"dataGaName":296,"dataGaLocation":29},"/the-source/","the source",{"text":298,"config":299,"lists":301},"Company",{"dataNavLevelOne":300},"company",[302],{"items":303},[304,309,315,317,322,327,332,337,342,347,352],{"text":305,"config":306},"About",{"href":307,"dataGaName":308,"dataGaLocation":29},"/company/","about",{"text":310,"config":311,"footerGa":314},"Jobs",{"href":312,"dataGaName":313,"dataGaLocation":29},"/jobs/","jobs",{"dataGaName":313},{"text":275,"config":316},{"href":277,"dataGaName":278,"dataGaLocation":29},{"text":318,"config":319},"Leadership",{"href":320,"dataGaName":321,"dataGaLocation":29},"/company/team/e-group/","leadership",{"text":323,"config":324},"Team",{"href":325,"dataGaName":326,"dataGaLocation":29},"/company/team/","team",{"text":328,"config":329},"Handbook",{"href":330,"dataGaName":331,"dataGaLocation":29},"https://handbook.gitlab.com/","handbook",{"text":333,"config":334},"Investor relations",{"href":335,"dataGaName":336,"dataGaLocation":29},"https://ir.gitlab.com/","investor relations",{"text":338,"config":339},"Trust Center",{"href":340,"dataGaName":341,"dataGaLocation":29},"/security/","trust center",{"text":343,"config":344},"AI Transparency Center",{"href":345,"dataGaName":346,"dataGaLocation":29},"/ai-transparency-center/","ai transparency center",{"text":348,"config":349},"Newsletter",{"href":350,"dataGaName":351,"dataGaLocation":29},"/company/contact/","newsletter",{"text":353,"config":354},"Press",{"href":355,"dataGaName":356,"dataGaLocation":29},"/press/","press",{"text":358,"config":359,"lists":360},"Contact us",{"dataNavLevelOne":300},[361],{"items":362},[363,366,371],{"text":36,"config":364},{"href":38,"dataGaName":365,"dataGaLocation":29},"talk to sales",{"text":367,"config":368},"Get help",{"href":369,"dataGaName":370,"dataGaLocation":29},"/support/","get help",{"text":372,"config":373},"Customer portal",{"href":374,"dataGaName":375,"dataGaLocation":29},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"close":377,"login":378,"suggestions":385},"Close",{"text":379,"link":380},"To search repositories and projects, login to",{"text":381,"config":382},"gitlab.com",{"href":43,"dataGaName":383,"dataGaLocation":384},"search login","search",{"text":386,"default":387},"Suggestions",[388,390,394,396,400,404],{"text":58,"config":389},{"href":63,"dataGaName":58,"dataGaLocation":384},{"text":391,"config":392},"Code Suggestions (AI)",{"href":393,"dataGaName":391,"dataGaLocation":384},"/solutions/code-suggestions/",{"text":110,"config":395},{"href":112,"dataGaName":110,"dataGaLocation":384},{"text":397,"config":398},"GitLab on AWS",{"href":399,"dataGaName":397,"dataGaLocation":384},"/partners/technology-partners/aws/",{"text":401,"config":402},"GitLab on Google Cloud",{"href":403,"dataGaName":401,"dataGaLocation":384},"/partners/technology-partners/google-cloud-platform/",{"text":405,"config":406},"Why GitLab?",{"href":71,"dataGaName":405,"dataGaLocation":384},{"freeTrial":408,"mobileIcon":413,"desktopIcon":418,"secondaryButton":421},{"text":409,"config":410},"Start free trial",{"href":411,"dataGaName":34,"dataGaLocation":412},"https://gitlab.com/-/trials/new/","nav",{"altText":414,"config":415},"Gitlab Icon",{"src":416,"dataGaName":417,"dataGaLocation":412},"/images/brand/gitlab-logo-tanuki.svg","gitlab icon",{"altText":414,"config":419},{"src":420,"dataGaName":417,"dataGaLocation":412},"/images/brand/gitlab-logo-type.svg",{"text":422,"config":423},"Get Started",{"href":424,"dataGaName":425,"dataGaLocation":412},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/compare/gitlab-vs-github/","get started",{"freeTrial":427,"mobileIcon":431,"desktopIcon":433},{"text":428,"config":429},"Learn more about GitLab Duo",{"href":63,"dataGaName":430,"dataGaLocation":412},"gitlab duo",{"altText":414,"config":432},{"src":416,"dataGaName":417,"dataGaLocation":412},{"altText":414,"config":434},{"src":420,"dataGaName":417,"dataGaLocation":412},"content:shared:en-us:main-navigation.yml","Main Navigation","shared/en-us/main-navigation.yml","shared/en-us/main-navigation",{"_path":440,"_dir":23,"_draft":6,"_partial":6,"_locale":7,"title":441,"button":442,"config":446,"_id":448,"_type":16,"_source":17,"_file":449,"_stem":450,"_extension":20},"/shared/en-us/banner","GitLab Duo Agent Platform is now in public beta!",{"text":69,"config":443},{"href":444,"dataGaName":445,"dataGaLocation":29},"/gitlab-duo/agent-platform/","duo banner",{"layout":447},"release","content:shared:en-us:banner.yml","shared/en-us/banner.yml","shared/en-us/banner",{"_path":452,"_dir":23,"_draft":6,"_partial":6,"_locale":7,"data":453,"_id":658,"_type":16,"title":659,"_source":17,"_file":660,"_stem":661,"_extension":20},"/shared/en-us/main-footer",{"text":454,"source":455,"edit":461,"contribute":466,"config":471,"items":476,"minimal":650},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":456,"config":457},"View page source",{"href":458,"dataGaName":459,"dataGaLocation":460},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":462,"config":463},"Edit this page",{"href":464,"dataGaName":465,"dataGaLocation":460},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":467,"config":468},"Please contribute",{"href":469,"dataGaName":470,"dataGaLocation":460},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":472,"facebook":473,"youtube":474,"linkedin":475},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[477,500,557,586,620],{"title":47,"links":478,"subMenu":483},[479],{"text":480,"config":481},"DevSecOps platform",{"href":56,"dataGaName":482,"dataGaLocation":460},"devsecops platform",[484],{"title":187,"links":485},[486,490,495],{"text":487,"config":488},"View plans",{"href":189,"dataGaName":489,"dataGaLocation":460},"view plans",{"text":491,"config":492},"Why Premium?",{"href":493,"dataGaName":494,"dataGaLocation":460},"/pricing/premium/","why premium",{"text":496,"config":497},"Why Ultimate?",{"href":498,"dataGaName":499,"dataGaLocation":460},"/pricing/ultimate/","why ultimate",{"title":501,"links":502},"Solutions",[503,508,511,513,518,523,527,530,534,539,541,544,547,552],{"text":504,"config":505},"Digital transformation",{"href":506,"dataGaName":507,"dataGaLocation":460},"/topics/digital-transformation/","digital transformation",{"text":135,"config":509},{"href":130,"dataGaName":510,"dataGaLocation":460},"security & compliance",{"text":124,"config":512},{"href":106,"dataGaName":107,"dataGaLocation":460},{"text":514,"config":515},"Agile development",{"href":516,"dataGaName":517,"dataGaLocation":460},"/solutions/agile-delivery/","agile delivery",{"text":519,"config":520},"Cloud transformation",{"href":521,"dataGaName":522,"dataGaLocation":460},"/topics/cloud-native/","cloud transformation",{"text":524,"config":525},"SCM",{"href":120,"dataGaName":526,"dataGaLocation":460},"source code management",{"text":110,"config":528},{"href":112,"dataGaName":529,"dataGaLocation":460},"continuous integration & delivery",{"text":531,"config":532},"Value stream management",{"href":162,"dataGaName":533,"dataGaLocation":460},"value stream management",{"text":535,"config":536},"GitOps",{"href":537,"dataGaName":538,"dataGaLocation":460},"/solutions/gitops/","gitops",{"text":172,"config":540},{"href":174,"dataGaName":175,"dataGaLocation":460},{"text":542,"config":543},"Small business",{"href":179,"dataGaName":180,"dataGaLocation":460},{"text":545,"config":546},"Public sector",{"href":184,"dataGaName":185,"dataGaLocation":460},{"text":548,"config":549},"Education",{"href":550,"dataGaName":551,"dataGaLocation":460},"/solutions/education/","education",{"text":553,"config":554},"Financial services",{"href":555,"dataGaName":556,"dataGaLocation":460},"/solutions/finance/","financial services",{"title":192,"links":558},[559,561,563,565,568,570,572,574,576,578,580,582,584],{"text":204,"config":560},{"href":206,"dataGaName":207,"dataGaLocation":460},{"text":209,"config":562},{"href":211,"dataGaName":212,"dataGaLocation":460},{"text":214,"config":564},{"href":216,"dataGaName":217,"dataGaLocation":460},{"text":219,"config":566},{"href":221,"dataGaName":567,"dataGaLocation":460},"docs",{"text":242,"config":569},{"href":244,"dataGaName":245,"dataGaLocation":460},{"text":237,"config":571},{"href":239,"dataGaName":240,"dataGaLocation":460},{"text":247,"config":573},{"href":249,"dataGaName":250,"dataGaLocation":460},{"text":260,"config":575},{"href":262,"dataGaName":263,"dataGaLocation":460},{"text":252,"config":577},{"href":254,"dataGaName":255,"dataGaLocation":460},{"text":265,"config":579},{"href":267,"dataGaName":268,"dataGaLocation":460},{"text":270,"config":581},{"href":272,"dataGaName":273,"dataGaLocation":460},{"text":275,"config":583},{"href":277,"dataGaName":278,"dataGaLocation":460},{"text":280,"config":585},{"href":282,"dataGaName":283,"dataGaLocation":460},{"title":298,"links":587},[588,590,592,594,596,598,600,604,609,611,613,615],{"text":305,"config":589},{"href":307,"dataGaName":300,"dataGaLocation":460},{"text":310,"config":591},{"href":312,"dataGaName":313,"dataGaLocation":460},{"text":318,"config":593},{"href":320,"dataGaName":321,"dataGaLocation":460},{"text":323,"config":595},{"href":325,"dataGaName":326,"dataGaLocation":460},{"text":328,"config":597},{"href":330,"dataGaName":331,"dataGaLocation":460},{"text":333,"config":599},{"href":335,"dataGaName":336,"dataGaLocation":460},{"text":601,"config":602},"Sustainability",{"href":603,"dataGaName":601,"dataGaLocation":460},"/sustainability/",{"text":605,"config":606},"Diversity, inclusion and belonging (DIB)",{"href":607,"dataGaName":608,"dataGaLocation":460},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":338,"config":610},{"href":340,"dataGaName":341,"dataGaLocation":460},{"text":348,"config":612},{"href":350,"dataGaName":351,"dataGaLocation":460},{"text":353,"config":614},{"href":355,"dataGaName":356,"dataGaLocation":460},{"text":616,"config":617},"Modern Slavery Transparency Statement",{"href":618,"dataGaName":619,"dataGaLocation":460},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"title":621,"links":622},"Contact Us",[623,626,628,630,635,640,645],{"text":624,"config":625},"Contact an expert",{"href":38,"dataGaName":39,"dataGaLocation":460},{"text":367,"config":627},{"href":369,"dataGaName":370,"dataGaLocation":460},{"text":372,"config":629},{"href":374,"dataGaName":375,"dataGaLocation":460},{"text":631,"config":632},"Status",{"href":633,"dataGaName":634,"dataGaLocation":460},"https://status.gitlab.com/","status",{"text":636,"config":637},"Terms of use",{"href":638,"dataGaName":639,"dataGaLocation":460},"/terms/","terms of use",{"text":641,"config":642},"Privacy statement",{"href":643,"dataGaName":644,"dataGaLocation":460},"/privacy/","privacy statement",{"text":646,"config":647},"Cookie preferences",{"dataGaName":648,"dataGaLocation":460,"id":649,"isOneTrustButton":92},"cookie preferences","ot-sdk-btn",{"items":651},[652,654,656],{"text":636,"config":653},{"href":638,"dataGaName":639,"dataGaLocation":460},{"text":641,"config":655},{"href":643,"dataGaName":644,"dataGaLocation":460},{"text":646,"config":657},{"dataGaName":648,"dataGaLocation":460,"id":649,"isOneTrustButton":92},"content:shared:en-us:main-footer.yml","Main Footer","shared/en-us/main-footer.yml","shared/en-us/main-footer",{"featuredPost":663,"allPosts":684,"totalPages":9590,"initialPosts":9591},{"_path":664,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":665,"content":668,"config":677,"_id":680,"_type":16,"title":681,"_source":17,"_file":682,"_stem":683,"_extension":20},"/en-us/blog/inside-gitlabs-healthy-backlog-initiative",{"noIndex":6,"title":666,"description":667},"Inside GitLab's Healthy Backlog Initiative","Learn how we are refining issue management to prioritize strategic work, improve delivery, and create stronger feedback loops with users.",{"title":666,"description":667,"authors":669,"heroImage":671,"date":672,"body":673,"category":14,"tags":674},[670],"Stan Hu","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664458/Blog/Hero%20Images/Gartner_AI_Code_Assistants_Blog_Post_Cover_Image_1800x945.png","2025-07-23","At GitLab, we are proud of the strong, collaborative relationship with our community. We encourage everyone to contribute to GitLab. Over the years, those community contributions have helped strengthen the GitLab platform. But as we've grown, community participation via GitLab issues has grown, resulting in an unwieldy issue backlog. \n\nGitLab's Product and Engineering teams recently launched the [Healthy Backlog Initiative](https://gitlab.com/groups/gitlab-org/-/epics/18639) to address this backlog and refine our approach to managing contributed issues going forward.\n\nIssues with ongoing community engagement, recent activity, or a clear strategic alignment will remain open. We'll be closing issues that are no longer relevant, lack community interest, or no longer fit our current product direction.\n\nThis focus will lead to increased innovation, better expectation setting, and faster development and delivery cycles of community-contributed capabilities.\n\n## What is the Healthy Backlog Initiative?  \n\nOver time, the GitLab community has submitted tens of thousands of issues, including bugs, feature requests, and feedback items. Currently, the [main GitLab issue tracker](https://gitlab.com/gitlab-org/gitlab/-/issues) contains over 65,000 issues, some are no longer applicable to the platform and others remain relevant today.\n\nOur Healthy Backlog Initiative will cull the backlog and establish a workstream for our Product and Engineering teams to implement a more focused approach to backlog management. They will conduct weekly assessments of the backlog to ensure that we prioritize issues that align with our product strategy and roadmap.\n\n**Note:** If you believe a closed issue does align with GitLab’s product strategy and roadmap, or if you're actively contributing to the request, we strongly encourage you to comment on the issue with updated context and current details. We are committed to reviewing these updated issues as part of our regular assessment efforts. \n\n## How does this change benefit you?\n\nThis streamlined approach means direct, tangible improvements for every GitLab user:\n\n* **Sharper focus and faster delivery:** By narrowing our backlog to strategically aligned features, we can dedicate development resources more effectively. This means you can expect shorter development cycles and more meaningful improvements to your GitLab experience.  \n\n* **Clearer expectations:** We are committed to transparent communication about what's on our roadmap and what isn't, empowering you to make informed decisions about your workflows and contributions.  \n\n* **Accelerated feedback loops:** With a clean backlog, new feedback and feature requests will be reviewed and prioritized more efficiently, reducing overall triage time and ensuring timely issues receive the necessary attention. This creates a more responsive feedback loop for everyone.  \n\nThis initiative does not diminish the significance of community feedback and contributions. We are taking this action to create clarity around what GitLab Team Members can realistically commit to delivering, and to ensure that all feedback receives proper consideration.\n\n## Looking forward\n\nThe GitLab Healthy Backlog Initiative reflects our commitment to being transparent and effective stewards of the GitLab platform. By clearly communicating our priorities and focusing our efforts on what we can realistically deliver over the next year, we're better positioned to meet and exceed your expectations.\n\nYour continued participation and feedback help make GitLab stronger. Every comment, merge request, bug report, and feature suggestion contributes to our shared vision. And we’re still rewarding you for that as well, with initiatives like our monthly Notable Contributor program, Swag rewards for leveling up, Hackathon winners, and more, all available through our [Contributor Portal](https://contributors.gitlab.com).\n> To learn more about how to contribute to GitLab, [visit our community site](https://about.gitlab.com/community/). To share feedback on this project, please add your comments on [the feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/556865) in this [epic](https://gitlab.com/groups/gitlab-org/-/epics/18639).",[268,675,676],"product","news",{"featured":92,"template":678,"slug":679},"BlogPost","inside-gitlabs-healthy-backlog-initiative","content:en-us:blog:inside-gitlabs-healthy-backlog-initiative.yml","Inside Gitlabs Healthy Backlog Initiative","en-us/blog/inside-gitlabs-healthy-backlog-initiative.yml","en-us/blog/inside-gitlabs-healthy-backlog-initiative",[685,711,735,757,778,799,819,839,859,880,901,923,944,966,988,1009,1030,1050,1070,1091,1111,1130,1151,1171,1191,1211,1231,1250,1272,1293,1314,1335,1354,1373,1392,1412,1431,1452,1474,1493,1515,1534,1554,1574,1594,1613,1633,1653,1671,1690,1711,1731,1751,1771,1790,1809,1829,1848,1867,1886,1907,1926,1946,1965,1986,2004,2024,2042,2062,2082,2102,2120,2140,2160,2180,2200,2220,2239,2259,2279,2298,2317,2338,2357,2377,2396,2416,2436,2454,2473,2493,2511,2528,2548,2568,2586,2606,2626,2647,2666,2685,2705,2724,2743,2764,2783,2802,2821,2841,2862,2881,2900,2919,2939,2958,2978,2997,3017,3035,3056,3074,3093,3113,3133,3151,3170,3189,3209,3227,3246,3264,3282,3301,3320,3339,3357,3376,3396,3416,3434,3454,3472,3492,3512,3529,3548,3567,3586,3607,3627,3646,3666,3686,3706,3726,3746,3766,3785,3804,3823,3842,3861,3881,3902,3921,3941,3961,3981,4000,4017,4037,4057,4076,4095,4115,4135,4154,4173,4192,4212,4230,4249,4268,4287,4307,4327,4346,4366,4385,4406,4426,4448,4466,4486,4505,4526,4545,4564,4584,4603,4621,4641,4660,4681,4701,4719,4739,4758,4776,4796,4816,4836,4856,4875,4895,4915,4935,4955,4975,4996,5015,5035,5055,5073,5092,5111,5130,5150,5170,5189,5208,5227,5247,5265,5284,5303,5322,5342,5362,5382,5401,5422,5440,5459,5479,5497,5515,5534,5554,5574,5593,5612,5631,5650,5669,5688,5707,5725,5744,5763,5783,5803,5821,5841,5861,5880,5899,5918,5937,5956,5977,5996,6014,6035,6054,6073,6092,6112,6130,6149,6168,6187,6207,6227,6246,6268,6287,6307,6326,6346,6365,6384,6404,6423,6444,6464,6482,6500,6519,6538,6557,6577,6597,6616,6636,6657,6677,6697,6717,6738,6758,6778,6795,6814,6835,6855,6874,6893,6912,6930,6949,6967,6985,7005,7025,7045,7065,7085,7105,7124,7142,7160,7179,7199,7217,7237,7256,7275,7293,7312,7331,7351,7369,7389,7409,7429,7448,7466,7485,7503,7523,7544,7562,7582,7601,7620,7640,7660,7680,7698,7718,7737,7755,7775,7794,7813,7832,7851,7871,7891,7911,7930,7949,7969,7987,8008,8028,8047,8067,8086,8106,8127,8146,8166,8186,8205,8225,8244,8263,8282,8300,8318,8336,8353,8371,8389,8408,8426,8443,8461,8479,8497,8515,8534,8552,8572,8589,8606,8624,8642,8660,8679,8696,8713,8732,8750,8768,8787,8806,8822,8841,8859,8878,8895,8913,8932,8950,8968,8987,9005,9022,9040,9058,9077,9095,9113,9132,9150,9169,9187,9205,9222,9239,9258,9276,9293,9310,9327,9345,9363,9381,9398,9415,9432,9449,9466,9484,9502,9520,9539,9556,9573],{"_path":686,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":687,"content":695,"config":705,"_id":707,"_type":16,"title":708,"_source":17,"_file":709,"_stem":710,"_extension":20},"/en-us/blog/how-we-decreased-gitlab-repo-backup-times-from-48-hours-to-41-minutes",{"title":688,"description":689,"ogTitle":688,"ogDescription":689,"noIndex":6,"ogImage":690,"ogUrl":691,"ogSiteName":692,"ogType":693,"canonicalUrls":691,"schema":694},"How we decreased GitLab repo backup times from 48 hours to 41 minutes","Learn how we tracked a performance bottleneck to a 15-year-old Git function and fixed it, leading to enhanced efficiency that supports more robust backup strategies and can reduce risk.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097166/Blog/Hero%20Images/Blog/Hero%20Images/REFERENCE%20-%20display%20preview%20for%20blog%20images%20%282%29_2pKf8RsKzAaThmQfqHIaa7_1750097166565.png","https://about.gitlab.com/blog/how-we-decreased-gitlab-repo-backup-times-from-48-hours-to-41-minutes","https://about.gitlab.com","article","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we decreased GitLab repo backup times from 48 hours to 41 minutes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Karthik Nayak\"},{\"@type\":\"Person\",\"name\":\"Manuel Kraft\"}],\n        \"datePublished\": \"2025-06-05\",\n      }",{"title":688,"description":689,"authors":696,"heroImage":690,"date":699,"body":700,"category":14,"tags":701},[697,698],"Karthik Nayak","Manuel Kraft","2025-06-05","Repository backups are a critical component of any robust disaster recovery strategy. However, as repositories grow in size, the process of creating reliable backups becomes increasingly challenging.  Our own [Rails repository](https://gitlab.com/gitlab-org/gitlab) was taking 48 hours to back up — forcing impossible choices between backup frequency and system performance. We wanted to tackle this issue for our customers and for our own users internally. \n\nUltimately, we traced the issue to a 15-year-old Git function with O(N²) complexity and fixed it with an algorithmic change, __reducing backup times exponentially__. The result: lower costs, reduced risk, and backup strategies that actually scale with your codebase.\n\nThis turned out to be a Git scalability issue that affects anyone with large repositories. Here's how we tracked it down and fixed it. \n\n## Backup at scale\n\nFirst, let's look at the problem. As organizations scale their repositories and backups grow more complex, here are some of the challenges they can face:\n\n* **Time-prohibitive backups:** For very large repositories, creating a repository backup could take several hours, which can hinder the ability to schedule regular backups. \n* **Resource intensity:** Extended backup processes can consume substantial server resources, potentially impacting other operations.\n* **Backup windows:** Finding adequate maintenance windows for such lengthy processes can be difficult for teams running 24/7 operations.\n* **Increased failure risk:** Long-running processes are more susceptible to interruptions from network issues, server restarts, and system errors, which can force teams to restart the entire very long backup process from scratch.\n* **Race conditions:** Because it takes a long time to create a backup, the repository might have changed a lot during the process, potentially creating an invalid backup or interrupting the backup because objects are no longer available.\n\nThese challenges can lead to compromising on backup frequency or completeness – an unacceptable trade-off when it comes to data protection. Extended backup windows can force customers into workarounds. Some might adopt external tooling, while others might reduce backup frequency, resulting in potential inconsistent data protection strategies across organizations.\n\nNow, let's dig into how we identified a performance bottleneck, found a resolution, and deployed it to help cut backup times.\n\n## The technical challenge\n\nGitLab's repository backup functionality relies on the [`git bundle create`](https://git-scm.com/docs/git-bundle) command, which captures a complete snapshot of a repository, including all objects and references like branches and tags. This bundle serves as a restoration point for recreating the repository in its exact state.\n\nHowever, the implementation of the command suffered from poor scalability related to reference count, creating a performance bottleneck. As repositories accumulated more references, processing time increased exponentially. In our largest repositories containing millions of references, backup operations could extend beyond 48 hours.\n\n### Root cause analysis\n\nTo identify the root cause of this performance bottleneck, we analyzed a flame graph of the command during execution.\n\n![Flame graph showing command during execution](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097176/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097176388.jpg)\n\nA flame graph displays the execution path of a command through its stack trace. Each bar corresponds to a function in the code, with the bar's width indicating how much time the command spent executing within that particular function.\n\nWhen examining the flame graph of `git bundle create` running on a repository with 10,000 references, approximately 80% of the execution time is consumed by the `object_array_remove_duplicates()` function. This function was introduced to Git in the [commit b2a6d1c686](https://gitlab.com/gitlab-org/git/-/commit/b2a6d1c686) (bundle: allow the same ref to be given more than once, 2009-01-17).\n\nTo understand this change, it's important to know that `git bundle create` allows users to specify which references to include in the bundle. For complete repository bundles, the `--all` flag packages all references.\n\nThe commit addressed a problem where users providing duplicate references through the command line – such as `git bundle create main.bundle main main` - would create a bundle without properly handling the duplicated main reference. Unbundling this bundle in a Git repository would break, because it tries to write the same ref twice. The code to avoid duplication uses nested `for` loops that iterate through all references to identify duplicates. This O(N²) algorithm becomes a significant performance bottleneck in repositories with large reference counts, consuming substantial processing time.\n\n### The fix: From O(N²) to efficient mapping\n\nTo resolve this performance issue, we contributed an upstream fix to Git that replaces the nested loops with a map data structure. Each reference is added to the map, which automatically ensures only a single copy of each reference is retained for processing.\n\nThis change dramatically enhances the performance of `git bundle create` and enables much better scalability in repositories with large reference counts. Benchmark testing on a repository with 10,000 references demonstrates a 6x performance improvement.\n\n```shell\nBenchmark 1: bundle (refcount = 100000, revision = master)\n  Time (mean ± σ): \t14.653 s ±  0.203 s\t[User: 13.940 s, System: 0.762 s]\n  Range (min … max):   14.237 s … 14.920 s\t10 runs\n\nBenchmark 2: bundle (refcount = 100000, revision = HEAD)\n  Time (mean ± σ):  \t2.394 s ±  0.023 s\t[User: 1.684 s, System: 0.798 s]\n  Range (min … max):\t2.364 s …  2.425 s\t10 runs\n\nSummary\n  bundle (refcount = 100000, revision = HEAD) ran\n\t6.12 ± 0.10 times faster than bundle (refcount = 100000, revision = master)\n```\n\nThe patch was accepted and [merged](https://gitlab.com/gitlab-org/git/-/commit/bb74c0abbc31da35be52999569ea481ebd149d1d) into upstream Git. At GitLab, we backported this fix to ensure our customers could benefit immediately, without waiting for the next Git release.\n\n## The result: Dramatically decreased backup times\n\nThe performance gains from this improvement have been nothing short of transformative:\n\n* **From 48 hours to 41 minutes:** Creating a backup of our largest repository (`gitlab-org/gitlab`) now takes just 1.4% of the original time.\n* **Consistent performance:** The improvement scales reliably across repository sizes.\n* **Resource efficiency:** We significantly reduced server load during backup operations.\n* **Broader applicability:** While backup creation sees the most dramatic improvement, all bundle-based operations that operate on many references benefit.\n\n## What this means for GitLab customers\n\nFor GitLab customers, this enhancement delivers immediate and tangible benefits on how organizations approach repository backup and disaster recovery planning:\n* **Transformed backup strategies**   \n  * Enterprise teams can establish comprehensive nightly schedules without impacting development workflows or requiring extensive backup windows.   \n  * Backups can now run seamlessly in the background during nightly schedules, instead of needing to be dedicated and lengthy.  \n* **Enhanced business continuity**  \n  * With backup times reduced from days to minutes, organizations significantly minimize their recovery point objectives (RPO). This translates to reduced business risk – in a disaster scenario, you're potentially recovering hours of work instead of days.  \n* **Reduced operational overhead**   \n  * Less server resource consumption and shorter maintenance windows.  \n  * Shorter backup windows mean reduced compute costs, especially in cloud environments, where extended processing time translates directly to higher bills.  \n* **Future-proofed infrastructure**   \n  * Growing repositories no longer force difficult choices between backup frequency and system performance.   \n  * As your codebase expands, your backup strategy can scale seamlessly alongside it\n\nOrganizations can now implement more robust backup strategies without compromising on performance or completeness. What was once a challenging trade-off has become a straightforward operational practice.\n\nStarting with the [GitLab 18.0](https://about.gitlab.com/releases/2025/05/15/gitlab-18-0-released/) release, all GitLab customers regardless of their license tier can already fully take advantage of these improvements for their [backup](https://docs.gitlab.com/administration/backup_restore/backup_gitlab/) strategy and execution. There is no further change in configuration required.\n\n## What's next\n\nThis breakthrough is part of our ongoing commitment to scalable, enterprise-grade Git infrastructure. While the improvement of 48 hours to 41 minutes for backup creation time represents a significant milestone, we continue to identify and address performance bottlenecks throughout our stack.\n\nWe're particularly proud that this enhancement was contributed upstream to the Git project, benefiting not just GitLab users but the broader Git community. This collaborative approach to development ensures that improvements are thoroughly reviewed, widely tested, and available to all.\n\n> Deep infrastructure work like this is how we approach performance at GitLab. Join the GitLab 18 virtual launch event to see what other fundamental improvements we're shipping. [Register today!](https://about.gitlab.com/eighteen/)",[702,703,675,704,480],"git","open source","performance",{"slug":706,"featured":92,"template":678},"how-we-decreased-gitlab-repo-backup-times-from-48-hours-to-41-minutes","content:en-us:blog:how-we-decreased-gitlab-repo-backup-times-from-48-hours-to-41-minutes.yml","How We Decreased Gitlab Repo Backup Times From 48 Hours To 41 Minutes","en-us/blog/how-we-decreased-gitlab-repo-backup-times-from-48-hours-to-41-minutes.yml","en-us/blog/how-we-decreased-gitlab-repo-backup-times-from-48-hours-to-41-minutes",{"_path":712,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":713,"content":719,"config":729,"_id":731,"_type":16,"title":732,"_source":17,"_file":733,"_stem":734,"_extension":20},"/en-us/blog/tutorial-secure-bigquery-data-publishing-with-gitlab",{"title":714,"description":715,"ogTitle":714,"ogDescription":715,"noIndex":6,"ogImage":716,"ogUrl":717,"ogSiteName":692,"ogType":693,"canonicalUrls":717,"schema":718},"Tutorial: Secure BigQuery data publishing with GitLab ","Learn how to create repeatable, auditable, and efficient processes for automating and securing BigQuery data exports.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659756/Blog/Hero%20Images/REFERENCE_-_display_preview_for_blog_images.png","https://about.gitlab.com/blog/tutorial-secure-bigquery-data-publishing-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: Secure BigQuery data publishing with GitLab \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Regnard Raquedan\"}],\n        \"datePublished\": \"2025-03-25\",\n      }",{"title":714,"description":715,"authors":720,"heroImage":716,"date":722,"body":723,"category":14,"tags":724},[721],"Regnard Raquedan","2025-03-25","GitLab offers a powerful solution for automating and securing [BigQuery](https://cloud.google.com/bigquery) data exports. This integration transforms manual exports into repeatable, auditable processes that can eliminate security vulnerabilities while saving valuable time. This tutorial explains how to implement this solution so you can quickly reduce manual operations, permission issues, and security concerns with just a few lines of GitLab YAML code.\n\nFollow along with this step-by-step video:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/gxXX-ItAreo?si=FijY9wMVppCW-18q\" frameborder=\"0\" allowfullscreen=\"true\">\u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## The solution architecture\n\nOur solution leverages GitLab CI/CD pipelines to automate the secure export of data from BigQuery to Google Cloud Storage. Here's the high-level architecture:\n\n1. SQL code is stored and version-controlled in GitLab.  \n2. After code review and approval, GitLab CI/CD pipeline executes the code.  \n3. The pipeline authenticates with Google Cloud.  \n4. SQL queries are executed against BigQuery.  \n5. Results are exported as CSV files to Google Cloud Storage.  \n6. Secure links to these files are provided for authorized consumption.\n\n## Prerequisites\n\nBefore we begin, ensure you have:\n\n* **Google Cloud APIs enabled:** BigQuery API and Cloud Storage API  \n* **Service account** with appropriate permissions:  \n  * BigQuery Job User  \n  * Storage Admin  \n  * **Note:** For this demo, we're using the service account approach for authentication, which is simpler to set up. For production environments, you might consider using GitLab's identity and access management integration with Google Cloud. This integration leverages Workload Identity Federation, which provides enhanced security and is more suitable for enterprise customers and organizations.  \n* **GitLab project** ready to store your SQL code and pipeline configuration\n\n## Step-by-step implementation\n\n**1. Configure Google Cloud credentials.**\n\nFirst, set up the necessary environment variables in your GitLab project:\n\n- Go to your **GitLab project > Settings > CI/CD**.  \n- Expand the **Variables** section.  \n- Add the following variables:  \n   * `GCS_BUCKET`: Your Google Cloud Storage bucket name  \n   * `GCP_PROJECT_ID`: Your Google Cloud project ID  \n   * `GCP_SA_KEY`: Base64-encoded service account key (mark as masked)\n\n**2. Create your SQL query.**\n\nCreate a file named `query.sql` in your GitLab repository with your BigQuery SQL query. The query looks like this:\n\n```\n-- This query shows a list of the daily top Google Search terms.\nSELECT\n   refresh_date AS Day,\n   term AS Top_Term,\n       -- These search terms are in the top 25 in the US each day.\n   rank,\nFROM `bigquery-public-data.google_trends.top_terms`\nWHERE\n   rank = 1\n       -- Choose only the top term each day.\n   AND refresh_date >= DATE_SUB(CURRENT_DATE(), INTERVAL 2 WEEK)\n       -- Filter to the last 2 weeks.\nGROUP BY Day, Top_Term, rank\nORDER BY Day DESC\n   -- Show the days in reverse chronological order.\n\n```\n\nThis query gets the top 25 search terms from Google Trends for the current day.\n\n**3. Configure the GitLab CI/CD pipeline.**\n\nCreate a `.gitlab-ci.yml` file in your repository root:\n\n```\nimage: google/cloud-sdk:alpine\n\ninclude:\n  - template: Jobs/Secret-Detection.gitlab-ci.yml  # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/Secret-Detection.gitlab-ci.yml\n\nexecute:\n  stage: deploy\n  script: \n    # Set up Google Cloud authentication and install necessary components\n    - export GOOGLE_CLOUD_CREDENTIALS=$(echo $SERVICE_ACCOUNT_KEY | base64 -d)\n    - echo $GOOGLE_CLOUD_CREDENTIALS > service-account-key.json \n    - gcloud auth activate-service-account --key-file service-account-key.json \n    - gcloud components install gsutil\n    # Set the active Google Cloud project\n    - gcloud config set project $PROJECT_ID\n    # Run the BigQuery query and export the results to a CSV file\n    - bq query --format=csv --use_legacy_sql=false \u003C test.sql > results.csv\n    # Create a Google Cloud Storage bucket if it doesn't exist\n    - gsutil ls gs://${CLOUD_STORAGE_BUCKET} || gsutil mb gs://${CLOUD_STORAGE_BUCKET}\n    # Upload the CSV file to the storage bucket\n    - gsutil cp results.csv gs://${CLOUD_STORAGE_BUCKET}/results.csv\n    # Set the access control list (ACL) to make the CSV file publicly readable\n    - gsutil acl ch -u AllUsers:R gs://${CLOUD_STORAGE_BUCKET}/results.csv\n    # Define the static URL for the CSV file\n    - export STATIC_URL=\"https://storage.googleapis.com/${CLOUD_STORAGE_BUCKET}/results.csv\"\n    # Display the static URL for the CSV file\n    - echo \"File URL = $STATIC_URL\"\n\n```\n\n**4. Run the pipeline.**\n\nNow, whenever changes are merged to your main branch, the pipeline will provide a link to the CSV file stored on the Google Cloud Storage bucket. This file contains the result of the executed SQL query that GitLab subjects to security checks.\n\n## Benefits of this approach\n\n* **Security:** Authentication is handled automatically via service accounts (or Workload Identity Federation for enhanced security in production environments).  \n* **Auditability:** All data exports are tracked through GitLab commits and pipeline logs.  \n* **Repeatability:** Consistent, predictable export process on every run, and can be scheduled.  \n* **Version control:** SQL queries are properly versioned and reviewed.  \n* **Automation:** Significantly fewer manual exports, reducing human error.\n\n## Try it today\n\nBy combining GitLab's DevSecOps capabilities with Google Cloud's BigQuery and Cloud Storage, you've now automated and secured your data publishing workflow. This approach reduces manual operations, resolves permission headaches, and addresses security concerns – all achieved with just a few lines of GitLab CI code.\n\n> Use this tutorial's [complete code example](https://gitlab.com/gitlab-partners-public/google-cloud/demos/big-query-data-publishing) to get started now.",[725,480,726,727,232,728],"DevSecOps","tutorial","workflow","google",{"slug":730,"featured":92,"template":678},"tutorial-secure-bigquery-data-publishing-with-gitlab","content:en-us:blog:tutorial-secure-bigquery-data-publishing-with-gitlab.yml","Tutorial Secure Bigquery Data Publishing With Gitlab","en-us/blog/tutorial-secure-bigquery-data-publishing-with-gitlab.yml","en-us/blog/tutorial-secure-bigquery-data-publishing-with-gitlab",{"_path":736,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":737,"content":743,"config":751,"_id":753,"_type":16,"title":754,"_source":17,"_file":755,"_stem":756,"_extension":20},"/en-us/blog/how-we-reduced-mr-review-time-with-value-stream-management",{"title":738,"description":739,"ogTitle":738,"ogDescription":739,"noIndex":6,"ogImage":740,"ogUrl":741,"ogSiteName":692,"ogType":693,"canonicalUrls":741,"schema":742},"How we reduced MR review time with Value Stream Management ","The GitLab engineering team leverages VSM to pinpoint bottlenecks in the merge request review process and streamline software delivery. See how we do it and what we've learned.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097876/Blog/Hero%20Images/Blog/Hero%20Images/REFERENCE%20-%20display%20preview%20for%20blog%20images%20%282%29_2pKf8RsKzAaThmQfqHIaa7_1750097875817.png","https://about.gitlab.com/blog/how-we-reduced-mr-review-time-with-value-stream-management","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we reduced MR review time with Value Stream Management \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Haim Snir\"}],\n        \"datePublished\": \"2025-02-20\",\n      }",{"title":738,"description":739,"authors":744,"heroImage":740,"date":746,"body":747,"category":14,"tags":748},[745],"Haim Snir","2025-02-20","At GitLab, we're passionate about using our own products internally, a.k.a. dogfooding. Dogfooding has led to significant improvements in accelerating our software delivery cycle time for customers. This article spotlights a specific use case where [GitLab Value Stream Management (VSM)](https://about.gitlab.com/solutions/value-stream-management/) has driven significant improvements for our engineering team. You'll learn how VSM helped us tackle two critical challenges: measuring the journey from idea conception to merge request completion, and streamlining our deployment workflows.\n\n## The Challenge: Identifying bottlenecks in MR reviews\n\nDespite having well-defined workflows, one team noticed that MRs were taking longer than expected to be reviewed and merged. The challenge wasn’t just about the delays themselves, but about understanding *where* in the review process these delays were happening and *why*.\n\nOur team’s goal was clear:\n\n- Identify where time was being spent from the initial idea to the final merge of an MR.  \n- Pinpoint specific bottlenecks in the review process.  \n- Understand how MR size, complexity, or documentation quality affect review time.\n\n## The Approach: Measures the MR review time in GitLab Value Stream Analytics\n\nValue Stream Analytics (VSA) enables organizations to map their entire workflow from idea to delivery, distinguishing between value-adding activities (VA) and non-value-adding activities (NVA) in the process flow. By calculating the ratio of value-added time to total lead time, the team can identify wasteful activities resulting in delays in MR reviews.\n\nTo obtain the necessary metrics, the team customized GitLab VSA to gain better visibility into our MR review process.\n\n### 1. Setting up a custom stage for MR review\n\nThe team added a [new custom stage](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#value-stream-stage-events) in VSA called **Review Time to Merge** to specifically track the time from when a reviewer was first assigned to when the MR was merged.\n\n* Start event: MR first reviewer assigned  \n* End event: MR merged\n\nBy defining this stage, VSA began measuring the duration of the MR review process, giving us precise data on where time was being spent.\n\n![Defining stage of VSA](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097884/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097883929.png)\n\n### 2. Using the Total Time Chart for clarity\n\nWith the custom stage in place, the team used the [**Total Time Chart** on the VSA Overview page](https://about.gitlab.com/blog/value-stream-total-time-chart/) (**Analyze > Value Stream**) to visualize how much time was spent during the new MR Review stage. By comparing the values represented by each area on the chart, the team could quickly identify how this stage contributed to the total software delivery lifecycle (SDLC) time.\n\n![total time chart for VSA](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097884/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097883930.png)\n\n### 3. Drilling down for deeper insights\n\nTo investigate specific delays, the team used the **Stage Navigation Bar** to dive deeper into the MR Review stage. This view allowed them to:\n\n- Sort MRs by review time: The stage table showed all related MRs, sorted by review duration, making it easy to detect slow MRs.  \n- Analyze individual MRs: For each MR, that team could examine factors such as reviewer assignment delays, multiple rounds of feedback, idle time after approval, and MR size/complexity.\n\n## The outcome: Actionable insights and improvements\n\nBy customizing VSA to track [MR review time](https://docs.gitlab.com/user/project/merge_requests/reviews/), the team uncovered several key insights:\n\n- **Delays in reviewer assignment:** Some MRs experienced delays because reviewers were assigned late, or reviewers had too many MRs in their queue.  \n- **Slow review start times:** Even after assignment, certain MRs sat idle before reviews began, often due to context switching or competing priorities.  \n- **Multiple feedback loops:** Larger MRs often required multiple rounds of feedback, which extended review time significantly.  \n- **Idle time post-approval:** Some MRs were approved but not merged promptly, often due to deployment coordination issues.\n\nFor the engineering manager on the team, VSA proved to be transformational/valuable in managing their team's workflow: *\"I've used the VSA to justify where we were spending time in MR completion. We have VSA customized to our needs, and it's been very beneficial to our investigations for opportunities for improvements.”* \n\nAlso, from this dogfooding experience, we’re now developing a key enhancement to improve visibility into the review process. We're adding a new event to VSA — [Merge request last approved at](https://gitlab.com/gitlab-org/gitlab/-/issues/503754) — which creates a stage that breaks down MR review steps even further for granular visibility.\n\n## The power of data-driven decisions\n\nBy leveraging GitLab’s VSA, we didn’t just identify bottlenecks – we gained actionable insights that led to measurable improvements in MR review time and overall developer productivity. We optimized merge request review cycles and increased developer throughput, validating our commitment to continuous improvement through measurement.\n\n> Want to learn more about how VSA can help your team? [Start a free, 60-day trial of GitLab Ultimate](https://about.gitlab.com/free-trial/), customize your value streams, and see how you can make improvements throughout the SDLC for your teams. Then, make sure to [share your feedback and experiences in this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/520962).\n\n## Read more\n\n- [Optimize value stream efficiency to do more with less, faster](https://about.gitlab.com/the-source/platform/optimize-value-stream-efficiency-to-do-more-with-less-faster/)\n- [New Scheduled Reports Generation tool simplifies value stream management](https://about.gitlab.com/blog/new-scheduled-reports-generation-tool-simplifies-value-stream-management/)\n- [Value stream analytics documentation](https://docs.gitlab.com/user/group/value_stream_analytics/)\n- [Value stream management: Total Time Chart simplifies top-down optimization flow](https://about.gitlab.com/blog/value-stream-total-time-chart/)\n",[675,749,480,727,750],"features","solutions architecture",{"slug":752,"featured":6,"template":678},"how-we-reduced-mr-review-time-with-value-stream-management","content:en-us:blog:how-we-reduced-mr-review-time-with-value-stream-management.yml","How We Reduced Mr Review Time With Value Stream Management","en-us/blog/how-we-reduced-mr-review-time-with-value-stream-management.yml","en-us/blog/how-we-reduced-mr-review-time-with-value-stream-management",{"_path":758,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":759,"content":765,"config":772,"_id":774,"_type":16,"title":775,"_source":17,"_file":776,"_stem":777,"_extension":20},"/en-us/blog/automating-container-image-migration-from-amazon-ecr-to-gitlab",{"title":760,"description":761,"ogTitle":760,"ogDescription":761,"noIndex":6,"ogImage":762,"ogUrl":763,"ogSiteName":692,"ogType":693,"canonicalUrls":763,"schema":764},"Automating container image migration from Amazon ECR to GitLab","When platform teams move their CI/CD to GitLab, migrating container images shouldn't be the bottleneck. Follow this step-by-step guide to automate the pipeline migration process.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663129/Blog/Hero%20Images/blog-image-template-1800x945__28_.png","https://about.gitlab.com/blog/automating-container-image-migration-from-amazon-ecr-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Automating container image migration from Amazon ECR to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Rizzi\"}],\n        \"datePublished\": \"2025-02-13\",\n      }",{"title":760,"description":761,"authors":766,"heroImage":762,"date":768,"body":769,"category":14,"tags":770},[767],"Tim Rizzi","2025-02-13","\"We need to migrate hundreds of container images from Amazon Elastic Container Registry (ECR) to GitLab. Can you help?\" This question kept coming up in conversations with platform engineers. They were modernizing their DevSecOps toolchain with GitLab but got stuck when faced with moving their container images. While each image transfer is simple, the sheer volume made it daunting.\n\nOne platform engineer perfectly said, \"I know exactly what needs to be done – pull, retag, push. But I have 200 microservices, each with multiple tags. I can't justify spending weeks on this migration when I have critical infrastructure work.\"\n\n## The challenge\n\nThat conversation sparked an idea. What if we could automate the entire process? When platform teams move their [CI/CD](https://about.gitlab.com/topics/ci-cd/) to GitLab, migrating container images shouldn't be the bottleneck. The manual process is straightforward but repetitive – pull each image, retag it, and push it to GitLab's Container Registry. Multiply this by dozens of repositories and multiple tags per image, and you're looking at days or weeks of tedious work.\n\n## The solution\n\nWe set out to create a GitLab pipeline that would automatically do all this heavy lifting. The goal was simple: Give platform engineers a tool they could set up in minutes and let run overnight, waking up to find all their images migrated successfully.\n\n### Setting up access\n\nFirst things first – security. We wanted to ensure teams could run this migration with minimal AWS permissions. Here's the read-only identity and access management (IAM) policy you'll need:\n\n```json\n{\n    \"Version\": \"2012-10-17\",\n    \"Statement\": [\n        {\n            \"Effect\": \"Allow\",\n            \"Action\": [\n                \"ecr:GetAuthorizationToken\",\n                \"ecr:BatchCheckLayerAvailability\",\n                \"ecr:GetDownloadUrlForLayer\",\n                \"ecr:DescribeRepositories\",\n                \"ecr:ListImages\",\n                \"ecr:DescribeImages\",\n                \"ecr:BatchGetImage\"\n            ],\n            \"Resource\": \"*\"\n        }\n    ]\n}\n```\n\n### GitLab configuration\n\nWith security handled, the next step is setting up GitLab. We kept this minimal - you'll need to configure these variables in your CI/CD settings:\n\n```\nAWS_ACCOUNT_ID: Your AWS account number\nAWS_DEFAULT_REGION: Your ECR region\nAWS_ACCESS_KEY_ID: [Masked]\nAWS_SECRET_ACCESS_KEY: [Masked]\nBULK_MIGRATE: true\n```\n\n### The migration pipeline\n\nNow for the interesting part. We built the pipeline using Docker-in-Docker to handle all the image operations reliably:\n\n```yaml\nimage: docker:20.10\nservices:\n  - docker:20.10-dind\n\nbefore_script:\n  - apk add --no-cache aws-cli jq\n  - aws sts get-caller-identity\n  - aws ecr get-login-password | docker login --username AWS --password-stdin\n  - docker login -u ${CI_REGISTRY_USER} -p ${CI_REGISTRY_PASSWORD} ${CI_REGISTRY}\n```\n\nThe pipeline works in three phases, each building on the last:\n\n1. Discovery\n\nFirst, it finds all your repositories:\n\n```bash\nREPOS=$(aws ecr describe-repositories --query 'repositories[*].repositoryName' --output text)\n```\n\n2. Tag enumeration\n\nThen, for each repository, it gets all the tags:\n\n```bash\nTAGS=$(aws ecr describe-images --repository-name $repo --query 'imageDetails[*].imageTags[]' --output text)\n```\n\n3. Transfer\n\nFinally, it handles the actual migration:\n\n```bash\ndocker pull ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_DEFAULT_REGION}.amazonaws.com/${repo}:${tag}\ndocker tag ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_DEFAULT_REGION}.amazonaws.com/${repo}:${tag} ${CI_REGISTRY_IMAGE}/${repo}:${tag}\ndocker push ${CI_REGISTRY_IMAGE}/${repo}:${tag}\n```\n\n## What you get\n\nRemember that platform engineer who didn't want to spend weeks on migration? Here's what this solution delivers:\n\n- automated discovery and migration of all repositories and tags\n- consistent image naming between ECR and GitLab\n- error handling for failed transfers\n- clear logging for tracking progress\n\nInstead of writing scripts and babysitting the migration, the platform engineer could focus on more valuable work.\n\n## Usage\n\nGetting started is straightforward:\n\n1. Copy the `.gitlab-ci.yml` to your repository.\n2. Configure the AWS and GitLab variables.\n3. Set `BULK_MIGRATE` to \"true\" to start the migration.\n\n## Best practices\n\nThrough helping teams with their migrations, we've learned a few things:\n\n- Run during off-peak hours to minimize the impact on your team.\n- Keep an eye on the pipeline logs - they'll tell you if anything needs attention.\n- Don't decommission ECR until you've verified all images transferred successfully.\n- For very large migrations, consider adding rate limiting to avoid overwhelming your network\n\nWe've open-sourced this pipeline in our public GitLab repository because we believe platform engineers should spend time building valuable infrastructure, not copying container images. Feel free to adapt it for your needs or ask questions about implementation.\n\n> #### Get started with this and other package components with our [CI/CD Catalog documentation](https://gitlab.com/explore/catalog/components/package).",[110,771,726,480,675,750],"AWS",{"slug":773,"featured":92,"template":678},"automating-container-image-migration-from-amazon-ecr-to-gitlab","content:en-us:blog:automating-container-image-migration-from-amazon-ecr-to-gitlab.yml","Automating Container Image Migration From Amazon Ecr To Gitlab","en-us/blog/automating-container-image-migration-from-amazon-ecr-to-gitlab.yml","en-us/blog/automating-container-image-migration-from-amazon-ecr-to-gitlab",{"_path":779,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":780,"content":786,"config":793,"_id":795,"_type":16,"title":796,"_source":17,"_file":797,"_stem":798,"_extension":20},"/en-us/blog/deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration",{"title":781,"description":782,"ogTitle":781,"ogDescription":782,"noIndex":6,"ogImage":783,"ogUrl":784,"ogSiteName":692,"ogType":693,"canonicalUrls":784,"schema":785},"Deploy a NodeJS Express app with GitLab's Cloud Run integration","This tutorial will show you how to use NodeJS and Express to deploy an application to Google Cloud. This step-by-step guide will have you up and running in less than 10 minutes with the Cloud Run integration.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097892/Blog/Hero%20Images/Blog/Hero%20Images/speedlights_speedlights.png_1750097891963.png","https://about.gitlab.com/blog/deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Deploy a NodeJS Express app with GitLab's Cloud Run integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarah Matthies\"},{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2025-01-13\",\n      }",{"title":781,"description":782,"authors":787,"heroImage":783,"date":790,"body":791,"category":14,"tags":792},[788,789],"Sarah Matthies","Noah Ing","2025-01-13","Are you looking to deploy your NodeJS app to Google Cloud with the least maintenance possible? This tutorial will show you how to utilize GitLab’s Google Cloud integration to deploy your NodeJS app in less than 10 minutes.\n\nTraditionally, deploying an application often requires assistance from production or DevOps engineers. This integration now empowers developers to handle deployments independently. Whether you’re a solo developer or part of a large team, this setup gives everyone the ability to deploy their applications efficiently.\n\n## Overview\n\n- Create a new project in GitLab\n- Set up your NodeJS application\n- Use the Google Cloud integration to create a Service account\n- Use the Google Cloud integration to configure Cloud Run via Merge Request\n- Enjoy your newly deployed NodeJS app\n- Follow the cleanup guide\n\n## Prerequisites\n- Owner access on a Google Cloud Platform project\n- Working knowledge of JavaScript/TypeScript (not playing favorites here!)\n- Working knowledge of GitLab CI\n- 10 minutes \n\n## Step-by-step guide\n\n### 1. Create a new project in GitLab\n\nWe decided to call our project `nodejs–express-cloud-run` for simplicity.\n\n![Create a new project](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097905106.png)\n\n### 2. Upload your NodeJS app or use this example to get started.\n\n[Demo](https://gitlab.com/demos/templates/nodejs-cloud-run)\n\n**Note:** Make sure to include the `cloud-run` [CI template](https://gitlab.com/gitlab-org/incubation-engineering/five-minute-production/library/-/raw/main/gcp/cloud-run.gitlab-ci.yml) within your project.\n\n![cloud-run CI template include](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750097905107.png)\n\n### 3. Use the Google Cloud integration to create a Service account.\n\nNavigate to __Operate > Google Cloud > Create Service account__.\n\n![Create Service account screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750097905109.png)\n\nAlso configure the region you would like the Cloud Run instance deployed to.\n\n![Cloud Run instance deployment region selection](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097905113.png)\n\n### 4. Go to the Deployments tab and use the Google Cloud integration to configure __Cloud Run via Merge Request__.\n\n![Deployments - Configuration of Cloud Run via Merge Request](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750097905115.png)\n\nThis will open a merge request – immediately merge it.\n\n![Merge request for deployment](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750097905117.png)\n\n__Note:__ `GCP_PROJECT_ID`, `GCP_REGION`, `GCP_SERVICE_ACCOUNT`, and `GCP_SERVICE_ACCOUNT_KEY` will all be automatically populated from the previous steps.\n\n![Variables listing](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097905118.png)\n\n### 5. Voila! Check your pipeline and you will see you have successfully deployed to Google Cloud Run using GitLab CI.\n\n![Successful deployment to Google Cloud Run](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097905119.png)\n\nClick the Service URL to view your newly deployed Node server.\n\n![View newly deployed Node server](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750097905120.png)\n\nIn addition, you can navigate to __Operate > Environments__ to see a list of deployments for your environments.\n\n![Environments view of deployment list](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image12_aHR0cHM6_1750097905121.png)\n\nBy clicking on the environment called `main`, you’ll be able to view a complete list of deployments specific to that environment.\n\n![Main view of deployments to specific environment](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750097905122.png)\n\n### 6. Next steps\n\nTo get started with developing your Node application, try adding another endpoint. For instance, in your `index.js` file, you can add a **/bye** endpoint as shown below:\n\n```\napp.get('/bye', (req, res) => {\n  res.send(`Have a great day! See you!`);\n});\n\n```\n\nPush the changes to the repo, and watch the `deploy-to-cloud-run` job deploy the updates. Once it’s complete, go back to the Service URL and navigate to the **/bye** endpoint to see the new functionality in action.\n\n![Bye message](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image11_aHR0cHM6_1750097905123.png)\n\n## Follow the cleanup guide\n\nTo prevent incurring charges on your Google Cloud account for the resources used in this tutorial, you can either delete the specific resources or delete the entire Google Cloud project. For detailed instructions, refer to the [cleanup guide here](https://docs.gitlab.com/ee/tutorials/create_and_deploy_web_service_with_google_cloud_run_component/#clean-up).\n\n> Read more of these helpful [tutorials from GitLab solutions architects](https://about.gitlab.com/blog/tags/solutions-architecture/).\n",[110,728,232,750,726],{"slug":794,"featured":92,"template":678},"deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration","content:en-us:blog:deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration.yml","Deploy A Nodejs Express App With Gitlabs Cloud Run Integration","en-us/blog/deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration.yml","en-us/blog/deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration",{"_path":800,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":801,"content":807,"config":813,"_id":815,"_type":16,"title":816,"_source":17,"_file":817,"_stem":818,"_extension":20},"/en-us/blog/how-to-deploy-a-php-app-using-gitlabs-cloud-run-integration",{"title":802,"description":803,"ogTitle":802,"ogDescription":803,"noIndex":6,"ogImage":804,"ogUrl":805,"ogSiteName":692,"ogType":693,"canonicalUrls":805,"schema":806},"How to deploy a PHP app using GitLab's Cloud Run integration","Are you using PHP and want an easy way to deploy your application to Google Cloud? Follow this guide to deploy your app with Google Cloud Run in under 10 minutes.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098264/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_519147119_2RafH61mqosMZv8HGAlsUj_1750098264407.jpg","https://about.gitlab.com/blog/how-to-deploy-a-php-app-using-gitlabs-cloud-run-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to deploy a PHP app using GitLab's Cloud Run integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christian Nnachi\"},{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2024-12-10\",\n      }",{"title":802,"description":803,"authors":808,"heroImage":804,"date":810,"body":811,"category":14,"tags":812},[809,789],"Christian Nnachi","2024-12-10","Writing PHP application code and ensuring the application is running smoothly in production are often two different skills sets owned by two different engineers. GitLab aims to bridge the gap by enabling the engineer who has written the PHP application code to also deploy it into Google Cloud Platform with little effort. \n\nWhether you own event-driven, long-running services or deploy containerized jobs to process data, Google Cloud Run automatically scales your containers up and down from zero — this means you only pay when your code is running.\n\nIf you are a PHP developer who would like to deploy your application with minimal effort to Google Cloud Platform, this guide will show you how using the GitLab Google Cloud Run integration. \n\n# Overview\n\n- Create a new project in GitLab\n- Set up your PHP application\n- Utilizing the Google Cloud integration, create a Service account\n- Utilizing the Google Cloud integration, configure Cloud Run via merge request\n- Try adding another endpoint\n- Clean up\n\n## Prerequisites\n- Owner access on a Google Cloud Platform project\n- Working knowledge of [PHP](https://www.php.net/manual/en/introduction.php), an open-source, general-purpose scripting language\n- Working knowledge of [GitLab CI](https://about.gitlab.com/topics/ci-cd/#what-is-continuous-integration-ci)\n- 10 minutes\n\n## 1. Create a new project in GitLab.\n\nWe decided to call our project `PHP cloud-run` for simplicity.\n\n![PHP cloud- run project](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098287/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098287615.png)\n\nThen, create an index.php app[https://gitlab.com/demos/templates/php-cloud-run/-/blob/main/index.php](https://gitlab.com/demos/templates/php-cloud-run/-/blob/main/index.php).\n\n```php\n\u003C?php\n\n$name = getenv('NAME', true) ?: 'World';\necho sprintf('Hello %s!', $name);\n```\n\n## 2. Utilizing the Google Cloud integration, create a Service account.\n\nNavigate to **Operate > Google Cloud > Create Service account**. \n\n![Create Service account screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750098287616.png)\n\nThen configure the region you would like the Cloud Run instance deployed to.\n\n![Configure region screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098287618.png)\n\n## 3. Utilizing the Google Cloud integration, configure **Cloud Run via merge request**.\n\n![Deployment configuration screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098287620.png)\n\nThis will open a merge request. Immediately merge this merge request.\n\n![Enable Deployments to Cloud run screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098287622.png)\n\n**Note:** `GCP_PROJECT_ID`, `GCP_REGION`,  `GCP_SERVICE_ACCOUNT`, and `GCP_SERVICE_ACCOUNT_KEY` will all be automatically populated from the previous steps.\n\n![Variables screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098287624.png)\n\nCheck your pipeline and you will see you have successfully deployed to Google Cloud Run utilizing GitLab CI.\n\n![merge branch screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098287625.png)\n\n\u003Cbr>\u003C/br>\n\n![Google Cloud Run deployed with GitLab CI](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098287627.png)\n\n## 4. Click the **Service URL** to view your newly deployed Flask server.\n\nIn addition, you can navigate to **Operate > Environments** to see a list of deployments for your environments.\n\n![Environments screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750098287628.png)\n\nBy clicking on the environment called **main**, you’ll be able to view a complete list of deployments specific to that environment.\n\n![Main environment](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750098287631.png)\n\n## 5. Add another endpoint\n\nTo get started with developing your PHP application, try adding another endpoint. For example, in your main file, you can add a `/bye` endpoint like this:\n\n```\n\n\u003C?php\n\n$name = getenv('NAME', true) ?: 'World';\n\nif ($_SERVER['REQUEST_URI'] == '/bye') {\n    echo sprintf('Goodbye %s!', $name);\n} else {\n    echo sprintf('Hello %s!', $name);\n}\n\n```\n\nPush the changes to the repo, and watch the `deploy-to-cloud-run` job deploy the updates. Once the job is complete, go back to the Service URL and navigate to the `/bye` endpoint to see the new functionality in action.\n\n### Clean up\n\nTo prevent incurring charges on your Google Cloud account for the resources used in this tutorial, you can either delete the specific resources or delete the entire Google Cloud project. For detailed instructions, refer to the [cleanup guide here](https://docs.gitlab.com/ee/tutorials/create_and_deploy_web_service_with_google_cloud_run_component/#clean-up).\n\n> Check out more [easy-to-follow tutorials from our Solutions Architecture team](https://about.gitlab.com/blog/tags/solutions-architecture/).",[750,726,728,232],{"slug":814,"featured":6,"template":678},"how-to-deploy-a-php-app-using-gitlabs-cloud-run-integration","content:en-us:blog:how-to-deploy-a-php-app-using-gitlabs-cloud-run-integration.yml","How To Deploy A Php App Using Gitlabs Cloud Run Integration","en-us/blog/how-to-deploy-a-php-app-using-gitlabs-cloud-run-integration.yml","en-us/blog/how-to-deploy-a-php-app-using-gitlabs-cloud-run-integration",{"_path":820,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":821,"content":827,"config":833,"_id":835,"_type":16,"title":836,"_source":17,"_file":837,"_stem":838,"_extension":20},"/en-us/blog/provision-group-runners-with-google-cloud-platform-and-gitlab-ci",{"title":822,"description":823,"ogTitle":822,"ogDescription":823,"noIndex":6,"ogImage":824,"ogUrl":825,"ogSiteName":692,"ogType":693,"canonicalUrls":825,"schema":826},"Provision group runners with Google Cloud Platform and GitLab CI","This tutorial will teach you how to set up a new group runner on GitLab.com using Google Cloud Platform in less than 10 minutes.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098300/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_623844718_4E5Fx1Q0DHikigzCsQWhOG_1750098300048.jpg","https://about.gitlab.com/blog/provision-group-runners-with-google-cloud-platform-and-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Provision group runners with Google Cloud Platform and GitLab CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarah Matthies\"},{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2024-11-19\",\n      }",{"title":822,"description":823,"authors":828,"heroImage":824,"date":829,"body":830,"category":14,"tags":831},[788,789],"2024-11-19","Are you interested in hosting your own servers to run your GitLab CI/CD pipelines but don’t know where to begin? Setting up a GitLab Runner to run your pipelines on your own infrastructure can seem like a daunting task as it requires infrastructure knowledge and the know-how to maintain that infrastructure. Typically this process requires the provision of infrastructure, the installing of dependency, and testing that it works with your GitLab instance.\n\nThis article highlights how easy it is to easily spin up a GitLab Runner of your own utilizing GitLab’s Google Cloud Integration. Follow this tutorial and it will teach you how to set up a new group runner on GitLab.com using Google Cloud Platform in less than 10 minutes!\n\nYou will learn how to:\n\n- Create a new group runner.\n- Configure the new group runner’s tags and description.\n- Register the new group runner by adding in configurations.\n- Provision the GitLab Runner utilizing `gcloud cli` and Terraform.\n- Have your GitLab Runner pick up its first GitLab CI job.\n\n## Prerequisites\n- A terminal with Bash installed\n- Owner access on a Google Cloud Platform project\n- Terraform (or OpenTofu) [Version 1.5](https://releases.hashicorp.com/terraform/1.5.7/) or greater \n- [gcloud CLI](https://cloud.google.com/sdk/docs/install) \n- 10 minutes\n\n## Tutorial\n1. Create a new group runner under __Build > Runners > New Group Runner__.\n\n__Note:__ Navigate to the group level.\n\n![GitLab Runner setup screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098317126.png)\n\n2. Configure the new group runner's tags, description, and any additional configurations.\n\n![New Group Runner setup](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098317127.png)\n\n3. Select __Google Cloud__.\n\n![Select Google Cloud screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098317129.png)\n\n4. Copy your project ID from Google Cloud Platform.\n\n![Copy project ID from GCP screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098317131.png)\n\n5. Fill out your Google Cloud project ID and choose a region, zone, and type of machine you want to use.\n\n![Screen to fill out Google Cloud information](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750098317132.png)\n\n6\\. Once this information is filled out, click **Setup instructions**.\n\nRun the bash script provided in Step 1 above.\n\n**Note:** This script was saved to a file called `setup.sh` for ease of use. You may copy this right into your terminal if you are confident in debugging.\n\n![Setup instructions screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098317134.png)\n\n![Script for GitLab Runner](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750098317135.png)\n\n7\\. Create a `main.tf` file and follow the instructions in GitLab.\n\n**Note:** If you want to use OpenTofu instead of Terraform, you can still copy the code and only have to adjust the Terraform commands for applying the configuration. \n\n![Install and register GitLab Runner screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750098317136.png)\n\nOnce successfully provisioned, you should be see the following:\n\n![GitLab Runner code](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098317137.png)\n\n8\\. If you close the instructions and click the **View runners** button, you will now have a newly provisioned runner present with \"Never contacted\" as its status.\n\n![Newly provisioned runner on screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098317139.png)\n\n9\\. In any project, add the following `.gitlab-ci.yml`.\n\n```  \nstages:  \n  - greet\n\nhello_job:  \n  stage: greet  \n  tags:  \n    - gcp-runner  \n  script:  \n    - echo \"hello\"  \n```\n\nVolia! You have set up your first GitLab Runner utilizing Google Cloud Platform.\n\n# Next steps\n\nNow that you have provisioned your very own GitLab Runner, consider optimizing it for your specific use case. Some things to consider with your runner moving forward:\n\n- Is the runner I provisioned the right size? Does it need additional resources for my use case? \n- Does the GitLab Runner contain all the dependency my builds need?  \n- How can I store the GitLab Runner as infrastructure as code?\n\n> Make sure to bookmark the [Provisioning runners in Google Cloud documentation](https://docs.gitlab.com/ee/ci/runners/provision_runners_google_cloud.html) for easy reference.\n",[726,480,832,110,750,728,232],"CI",{"slug":834,"featured":6,"template":678},"provision-group-runners-with-google-cloud-platform-and-gitlab-ci","content:en-us:blog:provision-group-runners-with-google-cloud-platform-and-gitlab-ci.yml","Provision Group Runners With Google Cloud Platform And Gitlab Ci","en-us/blog/provision-group-runners-with-google-cloud-platform-and-gitlab-ci.yml","en-us/blog/provision-group-runners-with-google-cloud-platform-and-gitlab-ci",{"_path":840,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":841,"content":847,"config":853,"_id":855,"_type":16,"title":856,"_source":17,"_file":857,"_stem":858,"_extension":20},"/en-us/blog/tutorial-how-to-set-up-your-first-gitlab-ci-cd-component",{"title":842,"description":843,"ogTitle":842,"ogDescription":843,"noIndex":6,"ogImage":844,"ogUrl":845,"ogSiteName":692,"ogType":693,"canonicalUrls":845,"schema":846},"Tutorial: How to set up your first GitLab CI/CD component","Use Python scripts in your GitLab CI/CD pipelines to improve usability. In this step-by-step guide, you'll learn how to get started building your own CI/CD component.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098410/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2826%29_3lH4gZFVIGCndksN6Rlg85_1750098409928.png","https://about.gitlab.com/blog/tutorial-how-to-set-up-your-first-gitlab-ci-cd-component","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: How to set up your first GitLab CI/CD component\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sophia Manicor\"},{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2024-11-12\",\n      }",{"title":842,"description":843,"authors":848,"heroImage":844,"date":850,"body":851,"category":14,"tags":852},[849,789],"Sophia Manicor","2024-11-12","Do you use Python scripts in your GitLab CI pipelines? Do you want to create pipelines at scale? This tutorial shows how to set up your first [GitLab CI/CD component](https://docs.gitlab.com/ee/ci/components/) to deploy Python scripts. \n\nA [CI/CD component is a reusable single pipeline configuration unit](https://about.gitlab.com/blog/introducing-ci-components/). Use components to create a small part of a larger pipeline, or even to compose a complete pipeline configuration.\n\n# Prerequisites\n- Basic Python knowledge\n- Working knowledge of GitLab CI\n- 8 minutes\n\n## Python script \n\n* **[The demo Python script](https://gitlab.com/demos/templates/gitlab-python-cicd-component/-/blob/main/src/script.py?ref_type=heads)**\n\nThis Python script utilizes a library called [ArgParse](https://docs.python.org/3/library/argparse.html) . ArgParse allows you to pass variables to script through the command line. This script takes in three arguments:\n[Python_container_image](https://docs.gitlab.com/ee/ci/yaml/#image): This is the Python container image you wish to use.\n[Stage](https://docs.gitlab.com/ee/ci/yaml/#stage): This is the GitLab CI stage in which you job will run in. \nName: This is your name.\n\n```python\nimport argparse\n\nparser = argparse.ArgumentParser(description='Python CICD Component Boilerplate')\nparser.add_argument('python_container_image', type=str, help='python:3.10-slim')\nparser.add_argument('stage', type=str, help='Build')\nparser.add_argument('persons_name', type=str, help='Noah')\nargs = parser.parse_args()\n\npython_container_image = args.python_container_image\nstage = args.stage\npersons_name = args.persons_name\n```\n\nThis will take in these three variables and print out simple statements:\n\n```python\nprint(\"You have chosen \" + python_container_image + \" as the container image\")\nprint(\"You have chosen \" + stage + \" as the stage to run this job\")\nprint(\"Thank you \" + persons_name + \"! you are succesfully using GitLab CI with a Python script.\")\n```\n\nTo test this script locally, you can call on the script by utilizing the following command:\n\n```bash\npython3 src/script.py python_container_image stage name\n```\n\nModify this script accordingly if you’d like to add in your own arguments!\n\n## Template \n\n* **[Demo of template](https://gitlab.com/demos/templates/gitlab-python-cicd-component/-/blob/main/templates/template.yml?ref_type=heads)**\n\n**Note:** As long as the `gitlab-ci.yml` is placed in the templates/directory, the CI/CD component will know to pick it up. We named our template `templates.yml`, but any name would work for this YAML file.\n\nNow, getting into the fun part of CI/CD components, inputs!  [Inputs](https://docs.gitlab.com/ee/ci/yaml/inputs.html) allow you to pass through variables into your pipeline. \n\n```yml\nspec:\n  inputs:\n    python_container_image:\n      default: python:3.10-slim\n      description: \"Define any python container image\"\n    stage:\n      default: build\n      description: \"Define the stage this job will run in\"\n    persons_name:\n      default: Noah\n      description: \"Put your name here\"\n```\nHere we have defined the three inputs that are our arguments in our Python script. You can see for each input we have added in a default value – this will be what the input is set to if not overridden. If we took out this default keyword the input would become mandatory when we use our component. As it is written now, adding in these inputs when we use our component is optional due to our default values.\n\nWe can also set descriptions to ensure that other developers can understand what to input when they use our component. Descriptions are optional but they provide self documentation within the code itself, which is always nice.\n\nAfter we set up our inputs, let’s write the rest of our component:\n\n```yml\ncomponent:\n  image: $[[ inputs.python_container_image ]]\n  stage: $[[ inputs.stage ]]\n  before_script:\n    - pip3 install -r src/requirements.txt\n  script: python3 src/script.py $[[ inputs.python_container_image ]] $[[ inputs.stage ]] $[[ inputs.persons_name ]]\n```\n\nTo use inputs in our component, we need to use the syntax `$[[ inputs.$VARIABLE ]]`. In the above code, you can see that we use inputs to define our image and stage with  `$[[ inputs.python_container_image ]]` and   `$[[ inputs.stage ]] `.\n\n```\nscript: python3 src/script.py $[[ inputs.python_container_image ]] $[[ inputs.stage ]] $[[ inputs.persons_name ]]\n```\nDiving into the script section, you can see we call upon our Python script.. We are able to pass our inputs in with the help of the ArgParse.\n\nNow that you have reviewed how the Python script works and the template has been set up, it is time to use the component!\n\n## Using the component \n\n* **[A demo of including the component](https://gitlab.com/demos/templates/gitlab-python-cicd-component/-/blob/main/.gitlab-ci.yml?ref_type=heads)\n\nIn order to utilize the CI/CD component we just created, we need to include it in the `.gitlab-ci.yml` file that is in the root of our directory. \n\n```\ninclude:\n  # include the component located in the current project from the current SHA\n  - component: $CI_SERVER_FQDN/$CI_PROJECT_PATH/template@$CI_COMMIT_SHA\n    inputs:\n      python_container_image: python:3.11-slim\n      stage: test\n      persons_name: Tanuki\n```\n\nOne way to include it is to call upon it locally in the current project from the current `Commit SHA`. You can find other ways to [reference a component in our documentation](https://docs.gitlab.com/ee/ci/components/#use-a-component).\n\nTo override the defaults, we have passed in other inputs so we get the correct image, stage, and name for our job. \n\nTry and change the `persons_names` to your own and watch the pipeline run!\n\n![ci/cd component tutorial - pipeline running](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098419/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098418901.png)\n\nVoila! You have learned how to set up a basic C/ICD component utilizing a Python ArgParse script!\n\n## What's next?\nIn the Python script, there is a commented out GitLab Python library and OS library. If you would like to interact with the GitLab API, you can uncomment these and add in a [GitLab personal access token](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html) to the [CI/CD variables](https://docs.gitlab.com/ee/ci/variables/) named `GLPAT`.\n\n```\nimport gitlab\nimport os\n```\nAfterwards you can then interact with the GitLab API.\n\n```\nglpat = os.environ['GLPAT']\n\ngl = gitlab.Gitlab(private_token=glpat)\n# SELF_HOSTED gl = gitlab.Gitlab(url='https://gitlab.example.com', private_token='xxxxxxxxxxxxxx')\ntry:\n   projects = gl.projects.list(get_all=True)\n   print(projects)\nexcept Exception as error:\n   print(\"Error:\", error)\n```\n\n> Learn more about CI/CD components and how to avoid building pipelines from scratch with the [GitLab CI/CD Catalog](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/). \n\n## Read more\n\n- [FAQ: GitLab CI/CD Catalog](https://about.gitlab.com/blog/faq-gitlab-ci-cd-catalog/)\n- [Introducing CI/CD Steps, a programming language for DevSecOps automation](https://about.gitlab.com/blog/introducing-ci-cd-steps-a-programming-language-for-devsecops-automation/)\n- [A CI/CD component builder's journey](https://about.gitlab.com/blog/a-ci-component-builders-journey/)\n",[110,726,725,750],{"slug":854,"featured":92,"template":678},"tutorial-how-to-set-up-your-first-gitlab-ci-cd-component","content:en-us:blog:tutorial-how-to-set-up-your-first-gitlab-ci-cd-component.yml","Tutorial How To Set Up Your First Gitlab Ci Cd Component","en-us/blog/tutorial-how-to-set-up-your-first-gitlab-ci-cd-component.yml","en-us/blog/tutorial-how-to-set-up-your-first-gitlab-ci-cd-component",{"_path":860,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":861,"content":867,"config":874,"_id":876,"_type":16,"title":877,"_source":17,"_file":878,"_stem":879,"_extension":20},"/en-us/blog/fast-python-flask-server-deployment-with-gitlab-google-cloud",{"title":862,"description":863,"ogTitle":862,"ogDescription":863,"noIndex":6,"ogImage":864,"ogUrl":865,"ogSiteName":692,"ogType":693,"canonicalUrls":865,"schema":866},"Fast Python Flask server deployment with GitLab + Google Cloud","This tutorial shows how to use GitLab’s Google Cloud integration to deploy a Python Flask server in less than 10 minutes, helping developers become more independent and efficient.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098427/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945_fJKX41PJHKCfSOWw4xQxm_1750098427691.png","https://about.gitlab.com/blog/fast-python-flask-server-deployment-with-gitlab-google-cloud","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Fast Python Flask server deployment with GitLab + Google Cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Noah Ing\"},{\"@type\":\"Person\",\"name\":\"Jerez Solis\"}],\n        \"datePublished\": \"2024-11-04\",\n      }",{"title":862,"description":863,"authors":868,"heroImage":864,"date":870,"body":871,"category":14,"tags":872},[789,869],"Jerez Solis","2024-11-04","Deploying an application to the cloud often requires assistance from production or DevOps engineers. GitLab's Google Cloud integration empowers developers to handle deployments independently. In this tutorial, you'll learn how to deploy a Python Flask server to Google Cloud in less than 10 minutes. Whether you’re a solo developer or part of a large team, this setup allows you to deploy applications efficiently.\n\nYou'll learn how to:\n\n- Create a new project in GitLab\n- Create a Flask server utilizing `main.py`\n- Utilize the Google Cloud integration to create a Service account\n- Utilize the Google Cloud integration to create Cloud Run via a merge request\n- Access your newly deployed Flask server\n- Clean up your environment\n\n## Prerequisites:\n- Owner access on a Google Cloud Platform project\n- Working knowledge of Python\n- Working knowledge of GitLab CI\n- 10 minutes\n\n## Step-by-step Python Flask server deployment to Google Cloud\n\n**1. Create a new project in GitLab.**\n\nWe decided to call our project \"python-flask-cloud-run\" for simplicity.\n\n![python flask server - create a new project in GitLab](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098438036.png)\n\n**2. Create a flask server utilizing main.py demo.**\n\nFind the `main.py` demo here: [https://gitlab.com/demos/applications/python-flask-cloud-run](https://gitlab.com/demos/applications/python-flask-cloud-run).\n\n```python\nimport os\n\nfrom flask import Flask\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef hello_world():\n    \"\"\"Example Hello World route.\"\"\"\n    name = os.environ.get(\"NAME\", \"World\")\n    return f\"Hello {name}!\"\n\nif __name__ == \"__main__\":\n    app.run(debug=True, host=\"0.0.0.0\", port=int(os.environ.get(\"PORT\", 8080)))\n```\n\n**3. Create a `requirements.txt` with the following dependencies.**\n\n```\nFlask==3.0.3\ngunicorn==22.0.0\nWerkzeug==3.0.3\n```\n\n**4. Utilizing the Google Cloud integration, create a Service account.**\n\nNavigate to **Operate > Google Cloud > Create Service account**.\n\n![python flask server - create service account](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098438037.png)\n\n**5. Also configure the region you would like the Cloud Run instance to deploy to.**\n\n![python flask server - configure the region](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098438038.png)\n\n**6. Utilizing the Google Cloud integration, configure Cloud Run via merge request.**\n\n![python flask server - deployments](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098438041.png)\n\n**7. This will open a merge request. Immediately merge this merge request.**\n\n![python flask server - enable deployments to Cloud Run](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750098438043.png)\n\n**Note:** `GCP_PROJECT_ID`, `GCP_REGION`, `GCP_SERVICE_ACCOUNT`, `GCP_SERVICE_ACCOUNT_KEY` will all be automatically populated from the previous steps.\n\n![python flask server - variables](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750098438044.png)\n\n**8. Voila! Check your pipeline and you will see you have successfully deployed to Google Cloud Run utilizing GitLab CI.**\n\n![python flask server - update dockerfile](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098438045.png)\n\n\u003Cbr>\u003C/br>\n\n![python flask server - dockerfile](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098438046.png)\n\n**9. Click the Service URL to view your newly deployed Flask server.**\n\nNavigate to **Operate > Environments** to see a list of deployments for your environments.\n\n![python flask server - deployments list](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750098438047.png)\n\nBy clicking on the environment called **main**, you’ll be able to view a complete list of deployments specific to that environment.\n\n![python flask server - main job listing](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098438048.png)\n\n## Next steps\n\nTo get started with developing your Flask application, try adding another endpoint. For instance, in your `main.py` file, you can add a **/bye** endpoint as shown below:\n\n```\n@app.route(\"/\")\ndef hello_world():\n    \"\"\"Example Hello World route.\"\"\"\n    name = os.environ.get(\"NAME\", \"World\")\n    return f\"Hello {name}!\"\n\n```\n\nPush the changes to the repo, and watch the `deploy-to-cloud-run` job deploy the updates. Once it’s complete, go back to the Service URL and navigate to the **/bye** endpoint to see the new functionality in action.\n\n## Clean up\n\nTo prevent incurring charges on your Google Cloud account for the resources used in this tutorial, you can either delete the specific resources or delete the entire Google Cloud project. For detailed instructions, refer to the [cleanup guide](https://docs.gitlab.com/ee/tutorials/create_and_deploy_web_service_with_google_cloud_run_component/#clean-up).\n\n> For more DevSecOps capabilities, [start a free 60-day trial of GitLab Ultimate and GitLab Duo](https://gitlab.com/-/trials/new?glm_content=default-saas-trial&glm_source=about.gitlab.com/blog/%2F).",[726,873,728,750],"cloud native",{"slug":875,"featured":92,"template":678},"fast-python-flask-server-deployment-with-gitlab-google-cloud","content:en-us:blog:fast-python-flask-server-deployment-with-gitlab-google-cloud.yml","Fast Python Flask Server Deployment With Gitlab Google Cloud","en-us/blog/fast-python-flask-server-deployment-with-gitlab-google-cloud.yml","en-us/blog/fast-python-flask-server-deployment-with-gitlab-google-cloud",{"_path":881,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":882,"content":888,"config":895,"_id":897,"_type":16,"title":898,"_source":17,"_file":899,"_stem":900,"_extension":20},"/en-us/blog/guide-to-rest-api",{"title":883,"description":884,"ogTitle":883,"ogDescription":884,"noIndex":6,"ogImage":885,"ogUrl":886,"ogSiteName":692,"ogType":693,"canonicalUrls":886,"schema":887},"Guide to REST API","Learn what REST API is, how it works, and what its benefit is in software development. Also find out the underlying principles of this important technology.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098516/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945_2N8JxZDeeDLlzrsJ4boteB_1750098516673.png","https://about.gitlab.com/blog/guide-to-rest-api","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Guide to REST API\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2024-10-18\",\n      }",{"title":883,"description":884,"authors":889,"heroImage":885,"date":891,"body":892,"category":14,"tags":893},[890],"GitLab","2024-10-18","Whether it's developing an online booking app, a mobile payment solution, or a messaging service, chances are your team is using a REST API. In this article, you'll learn what a REST API is and how it works, as well as its benefits and uses.\n\n## What is a REST API?\n\nREST API, RESTful API, or RESTful web API: These names designate APIs that adhere to a particular standard, which is the REST architecture. Before going any further, remember that an API, or application programming interface, is software that allows two applications to communicate with each other. In computing, APIs are essential to allow various applications to work together.\n\nTo create an API, developers follow strictly defined methods and principles, so that the whole can work. Before the 2000s, developers used [SOAP](https://www.techtarget.com/searchapparchitecture/definition/SOAP-Simple-Object-Access-Protocol) (Simple Object Access Protocol), a protocol built on XML (Extensible Markup Language), which was complex to coordinate and resource-intensive. While SOAP is still used today, it has been largely replaced by REST API.\n\nDesigned in 2000 by American computer scientist Roy Fielding during his doctoral thesis, REST (REpresentational State Transfer) has become the dominant model for creating APIs, and an essential milestone in the development of the World Wide Web. Today, the vast majority of APIs are based on REST, particularly to offer web, interactive, or mobile services. Let's find out how RESTful APIs work, their advantages, and their wide-ranging applications.\n\n## How does a REST API work?\n\nIn practice, the REST API works on the principle of the client-server environment. The RESTful API retrieves and transmits a user's or application's requests on one end and the information rendered by the server (application or database) on the other end.\n\nSome key concepts make it possible to understand how a RESTful API works. The client is the entity making a request. This is the case, for example, of a user searching within a product catalog on their browser. The API is responsible for communicating the request to the server, and returning the requested information to the client. The information that passes through the API is the resources. The server processes requests. In this case, it will return the list of products matching the search criteria.\n\nThe client's requests are made through the HTTP (Hypertext Transfer Protocol) protocol. Here are the main methods and tasks it enables you to accomplish:\n- GET: retrieve data sent by the server.\n- POST: send and publish information to the server (registration form data, for example).\n- PUT: update the server information.\n- PATCH: partially modify an existing resource.\n- DELETE: delete information from the server.\n\nThere are various data formats for using a REST API. The JSON (JavaScript Object Notation) format is a lightweight format, which is easy to understand and usable by many programming languages. XML makes it possible to manage complex data structures and is compatible with other standards such as RSS. YAML and HTML are other formats often used to communicate resources.\n\n## What are the principles of the REST API?\n\nA REST API follows the REST principles regarding software architecture. These principles create a guideline for creating flexible and lightweight APIs, which are perfectly adapted to data transmission over the internet.\n\nHere are the six architectural principles that govern a REST interface:\n- Client-server decoupling. The client only knows the URI (Uniform Resource Identifier) of the resource to be retrieved. The server interacts only by transmitting its data via HTTP.\n- Uniform interface. The REST architecture standardizes how information is identified, managed, and transmitted, and uses hyperlinks to bring additional resources to the client.\nCode on demand. The server can transmit code to the client to expand its functionality, such as to help identify errors in a form.\n- Layered system. A RESTful API can run on several servers organized hierarchically, to provide a more stable and efficient service to the client.\n- Cacheable. The REST server can cache data to better serve the client, for example by storing the images of a site to then serve them again.\n- Stateless. Each client request is stand-alone and processed independently by the server. Therefore, each request must contain all the elements necessary for its processing.\n\n## What are the benefits of a REST API?\n\nBy following the REST API framework requirements, developers make use of the many advantages of the RESTful API to develop effective and powerful applications:\n- Versatility: There are no restrictions on which programming language to use, and there is a wide selection of data formats (XML, PYTHON, JSON, HTML, etc.).\n- Lightweight: The lightweight data formats of a REST API make it ideal for mobile applications or the Internet of Things (IoT).\n- Portability: Client-server separation enables the exchange of data between platforms.\nFlexibility: This API does not have the complexities of a protocol since it is an architectural style.\n- Independence: Developers can work separately on the client or server part.\n\nThe benefits of the REST API translate into increased productivity and scalability for development teams. Scaling systems using REST API is easier. The features are therefore better able to support a large load of users and operations.\n\n## Security constraints\n\nCreating and managing a RESTful web API is not without challenges. User authentication can become complex when it uses several different methods, by HTTP, API keys, or OAuth (Open Authorization). On large and complex applications, the multiplication of endpoints between the server and the client can impair overall consistency, as can updates if they leave old touchpoints still active.\n\nAdditionally, the REST interface has a weakness because it transmits potentially sensitive data, such as identifiers, through the endpoint URL. Securing it requires specific measures such as Transport Layer Security (TLS) encryption, a robust user authentication model, and a system for managing malicious requests and limiting throughput.\n\n## Uses of a REST API\n\nDevelopers use APIs with the REST architecture to create and maintain many services. Therefore, most web and mobile applications use REST APIs to access and share resources and information. In the cloud, this API makes it possible to connect the services of distributed and hybrid architectures quickly. Within large companies, it enables interoperability between information system components.\n\nRefreshing an e-commerce site's prices, automating publications, orchestrating Kubernetes clusters, etc. The RESTful APIs' scope of use is limited only by the imagination of digital application developers and creators.\n\n## The GitLab REST API\n\nGitLab offers a comprehensive suite of tools and APIs for integrating and automating external applications. It includes GraphQL, webhooks, IDE extensions, and of course, a REST API. The GitLab REST API can be authenticated in many ways, such as by access token, OAuth, or session cookies. Endpoints are available for Dockerfile, .gitignore, GitLab CI/CD YAML, and open source templates. To take full advantage of all the possibilities for developing your agile and cloud-native applications, see the complete [GitLab REST API documentation](https://docs.gitlab.com/ee/api/rest/index.html).\n\n## REST API FAQs\n\n### REST vs. SOAP\n\nREST and SOAP are two API standards. REST (REpresentational State Transfer) API uses the REST architectural principles, which allow a server and a client to communicate in a lightweight and scalable way. The REST API is the most common type of API. The SOAP (Simple Object Access Protocol) protocol is older, more rigid, and only available in XML format. This old standard can still be used for applications that require a high level of security.\n\n### What is the difference between REST and REST API?\n\nREST is a style of software architecture intended to facilitate the creation of web services and the exchange of data over the internet, by ensuring interoperability between computers and servers. The RESTful web API is a type of API that is based on the main principles of REST.\n\n### What are the principles of a REST API?\n\nA REST API follows the six main principles of the REST architecture. These principles are uniform interface, code on demand, layered system, cacheable, stateless, and client-server decoupling. The latter principle forms the basis of the structure of a RESTful API; it is essential to the success of this API in the world of web applications.\n\n## Learn more\n- [GitLab Rest API documentation](https://docs.gitlab.com/ee/api/rest/)\n- [Extend with GitLab](https://docs.gitlab.com/ee/api/)",[232,894],"DevOps",{"slug":896,"featured":6,"template":678},"guide-to-rest-api","content:en-us:blog:guide-to-rest-api.yml","Guide To Rest Api","en-us/blog/guide-to-rest-api.yml","en-us/blog/guide-to-rest-api",{"_path":902,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":903,"content":909,"config":917,"_id":919,"_type":16,"title":920,"_source":17,"_file":921,"_stem":922,"_extension":20},"/en-us/blog/how-we-designed-the-gitlab-reference-architectures",{"title":904,"description":905,"ogTitle":904,"ogDescription":905,"noIndex":6,"ogImage":906,"ogUrl":907,"ogSiteName":692,"ogType":693,"canonicalUrls":907,"schema":908},"How we designed the GitLab Reference Architectures","Take a look back with us as we dive into our Reference Architectures design journey to help users easily deploy GitLab at scale. Learn our goals, process, and what's happened in the five years since.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098651/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%282%29_52vS9ne2Hu3TElOeHep0AF_1750098651525.png","https://about.gitlab.com/blog/how-we-designed-the-gitlab-reference-architectures","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we designed the GitLab Reference Architectures\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Grant Young\"}],\n        \"datePublished\": \"2024-10-02\",\n      }",{"title":904,"description":905,"authors":910,"heroImage":906,"date":912,"body":913,"category":14,"tags":914},[911],"Grant Young","2024-10-02","We introduced the first [GitLab Reference Architectures](https://docs.gitlab.com/ee/administration/reference_architectures) five years ago. Originally developed as a partnership between the GitLab Test Platform (formally Quality Engineering) and Support teams, along with other contributors, these architectures aim to provide scalable and elastic starting points to deploy GitLab at scale, tailored to an organization's target load.\n\nSince their debut, we've been thrilled to see the impact these architectures have had on our customers as they navigate their DevSecOps journey. We continue to iterate, expand, and refine the architectures, reflecting our commitment to providing you with the latest, best-in-class guidance on deploying, scaling, and maintaining your GitLab environments.\n\nIn recognition of the five-year milestone, here is a peek behind the curtain on _how_ we designed the Reference Architectures and how that design still applies today.\n\n## The problem\n\nBefore introducing the Reference Architectures, we frequently heard from our customers about the hurdles they faced when deploying GitLab at scale to meet their performance and availability goals.\n\nWhile every GitLab environment can be considered a little unique because of the need to meet a customer's own requirements, we recognized from running GitLab.com, as well as from our larger customers, that there were common fundamentals to deploying GitLab at scale that were worth sharing. Our objective was to address customer needs while promoting deployment best practices to reduce drift and increase alignment.\n\nSimultaneously, we wanted to significantly expand our performance testing efforts. The goals of this expansion were to provide our engineering teams with a deeper understanding of performance bottlenecks, to drive improvements in GitLab's performance, and to continuously test the application moving forward to ensure it remained performant. However, to conduct meaningful performance tests, we needed a standardized GitLab environment design capable of handling the target loads.\n\nEnter the Reference Architectures.\n\n## The goals\n\nWith the need for a common architecture clear, we turned next to set the goals of this initiative, which ultimately became the following:\n\n- Performance: Ensure the architecture can handle the target load efficiently.\n- Availability: Maximize uptime and reliability wherever possible.\n- Scalability and elasticity: Ensure the architecture is scalable and elastic to meet individual customer needs.\n- Cost-effectiveness: Optimize resource allocation to avoid unnecessary expenses.\n- Maintainability: Make the architecture deployment and management as straightforward as possible with standardized configurations.\n\nIt's crucial to note that these goals were not in order and they are goals we stay true to today.\n\n## The process\n\nOnce the goals were set, we faced the challenge of designing an architecture, validating it, and making sure that it was fit for purpose and met those goals.\n\nThe process itself was relatively simple in design:\n\n- Gather metrics on existing environments and the loads they were able to handle.\n- Define a prototype architecture based on these metrics.\n- Build and test the environment to validate.\n- Adjust the environment iteratively based on the test results and metrics until we had a validated architecture that met the goals.\n\nWhile simple in design, this, of course, was not the case in practice so we got to work.\n\nFirst, we collected and reviewed the data. To that end, we reviewed metrics and logging data from GitLab.com as well as several participating large customers to correlate the environment sizes deployed to the load they were handling. To achieve this, we needed an objective and quantifiable way to measure that load across any environment, and for that we used **Requests per Seconds (RPS)**. With RPS we could see the concurrent load each environment handled and correlate this to the user count accordingly. Specifically, a user count would correlate to the full manual and automated load (such as continuous integration). From that data, we were able to correlate this across several environment sizes and start to pick out common patterns for the architectures.\n\nNext, we started with a prototype architecture that aimed to meet the goals while cross-referencing with the data we collected. In fact, we actually started this step in conjunction with the first step initially as we had a good enough idea of where to start: Taking the fundamental GitLab.com design and scaling it down for individual customer loads in cost-effective ways. This allowed us to start performance testing the prototype with the data we were analyzing to corroborate accordingly. After quite a few iterations, we had a starting point for our prototype architecture.\n\nTo thoroughly test and validate the architecture we needed to turn to performance testing and define our methodology. The approach was to target our most common endpoints with a representative test data set at RPS loads that were also representative. Then, although we had manually built the prototype architecture, we knew we needed tooling to automatically build environments and handle tasks such as updates. These efforts resulted in the [GitLab Performance Tool](https://about.gitlab.com/blog/how-were-building-up-performance-testing-of-gitlab/) and [GitLab Environment Toolkit](https://about.gitlab.com/blog/why-we-are-building-the-gitlab-environment-toolkit-to-help-deploy-gitlab-at-scale/), which I blogged about previously and which we continue to use to this day (and you can use too!).\n\nWith all the above in place we started the main work of validating the prototype architecture through multiple cycles of testing and iterating. In each cycle, we would performance test the environment, review the results and metrics, and adjust the environment accordingly. Through iteration we were able to identify what failures were real application performance issues and what were environmental, and eventually we had our first architecture. That architecture is now known as the [200 RPS or 10,000-user Reference Architecture](https://docs.gitlab.com/ee/administration/reference_architectures/10k_users.html).\n\n![GitLab Reference Architecture - 200 RPS](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098658/Blog/Content%20Images/Blog/Content%20Images/reference_architecture_aHR0cHM6_1750098658326.png)\n\n## Where Reference Architectures are today\n\nSince publishing our first validated Reference Architecture, the work has never stopped! We like to describe the architectures as living documentation, as they're constantly being improved and expanded with additions such as:\n\n- various Reference Architecture sizes based on common deployments\n- non-highly available sizes for smaller environments\n- full step-by-step documentation in collaboration with our colleagues in Technical Writing and Support\n- expanded guidance and new naming scheme to help with right sizing, scaling, and how to deal with outliers such as monorepos\n- cloud native hybrid variants where select components are run in Kubernetes\n- recommendations and guidance for cloud provider services\n- and more! Check out the [update history](https://docs.gitlab.com/ee/administration/reference_architectures/#update-history) section in the Reference Architecture documentation!\n\nAll this is driven by our [comprehensive testing program](https://docs.gitlab.com/ee/administration/reference_architectures/#validation-and-test-results) that we built alongside the Reference Architectures to continuously test that they remain fit for purpose against the latest GitLab code _every single week_ and to catch any unexpected performance issues early.\n\nAnd we're thrilled to see these efforts have helped numerous customers to date as well as our own engineering teams deliver new, exciting services. In fact, our engineering teams used the Reference Architectures to develop [GitLab Dedicated](https://about.gitlab.com/dedicated/). Five years on, our commitment is stronger than ever. The work very much continues in the same way it started to ensure you have the best-in-class guidance for your DevSecOps journey.\n\n> Learn more about [GitLab Reference Architectures](https://docs.gitlab.com/ee/administration/reference_architectures/).\n",[832,725,704,915,916],"inside GitLab","customers",{"slug":918,"featured":92,"template":678},"how-we-designed-the-gitlab-reference-architectures","content:en-us:blog:how-we-designed-the-gitlab-reference-architectures.yml","How We Designed The Gitlab Reference Architectures","en-us/blog/how-we-designed-the-gitlab-reference-architectures.yml","en-us/blog/how-we-designed-the-gitlab-reference-architectures",{"_path":924,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":925,"content":931,"config":938,"_id":940,"_type":16,"title":941,"_source":17,"_file":942,"_stem":943,"_extension":20},"/en-us/blog/using-child-pipelines-to-continuously-deploy-to-five-environments",{"title":926,"description":927,"ogTitle":926,"ogDescription":927,"noIndex":6,"ogImage":928,"ogUrl":929,"ogSiteName":692,"ogType":693,"canonicalUrls":929,"schema":930},"Using child pipelines to continuously deploy to five environments","Learn how to manage continuous deployment to multiple environments, including temporary, on-the-fly sandboxes, with a minimalist GitLab workflow.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097012/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_397632156_3Ldy1urjMStQCl4qnOBvE0_1750097011626.jpg","https://about.gitlab.com/blog/using-child-pipelines-to-continuously-deploy-to-five-environments","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Using child pipelines to continuously deploy to five environments\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Olivier Dupré\"}],\n        \"datePublished\": \"2024-09-26\",\n      }",{"title":926,"description":927,"authors":932,"heroImage":928,"date":934,"body":935,"category":14,"tags":936},[933],"Olivier Dupré","2024-09-26","DevSecOps teams sometimes require the ability to manage continuous deployment across multiple environments — and they need to do so without changing their workflows. The [GitLab DevSecOps platform](https://about.gitlab.com/) supports this need, including temporary, on-the-fly sandboxes, with a minimalist approach. In this article, you'll learn how to run continuous deployment of infrastructure using Terraform, over multiple environments.\n\nThis strategy can easily be applied to any project, whether it is infrastructure as code (IaC) relying on another technology, such as [Pulumi](https://www.pulumi.com/) or [Ansible](https://www.ansible.com/), source code in any language, or a monorepo that mixes many languages.\n\nThe final pipeline that you will have at the end of this tutorial will deploy:\n\n* A temporary **review** environment for each feature branch.\n* An **integration** environment, easy to wipe out and deployed from the main branch.\n* A **QA** environment, also deployed from the main branch, to run quality assurance steps.\n* A **staging** environment, deployed for every tag. This is the last round before production.\n* A **production** environment, just after the staging environment. This one is triggered manually for demonstration, but can also be continuously deployed.\n\n>Here is the legend for the flow charts in this article:\n> * Round boxes are the GitLab branches.\n> * Square boxes are the environments.\n> * Text on the arrows are the actions to flow from one box to the next.\n> * Angled squares are decision steps.\n\n\u003Cpre class=\"mermaid\">\nflowchart LR\n    A(main) -->|new feature| B(feature_X)\n\n    B -->|auto deploy| C[review/feature_X]\n    B -->|merge| D(main)\n    C -->|destroy| D\n\n    D -->|auto deploy| E[integration]\n    E -->|manual| F[qa]\n\n    D -->|tag| G(X.Y.Z)\n    F -->|validate| G\n\n    G -->|auto deploy| H[staging]\n    H -->|manual| I{plan}\n    I -->|manual| J[production]\n\u003C/pre>\n\nOn each step, you'll learn the [why](#why) and the [what](#what) before moving to the [how](#how). This will help you fully understand and replicate this tutorial.\n\n## Why\n\n* [Continuous integration](https://about.gitlab.com/topics/ci-cd/#what-is-continuous-integration-ci) is almost a de facto standard. Most companies have implemented CI pipelines or are willing to standardize their practice.\n\n* [Continuous delivery](https://about.gitlab.com/topics/ci-cd/#what-is-continuous-delivery-cd), which pushes artifacts to a repository or registry at the end of the CI pipeline, is also popular.\n\n* Continuous deployment, which goes further and deploys these artifacts automatically, is less widespread. When it has been implemented, we see it essentially in the application field. When discussing continuously deploying  infrastructure, the picture seems less obvious, and is more about managing several environments. In contrast, testing, securing, and verifying the infrastructure's code seems more challenging. And this is one of the fields where DevOps has not yet reached its maturity. One of the other fields is to shift security left, integrating security teams and, more importantly, security concerns, earlier in the delivery lifecycle, to upgrade from DevOps to ***DevSecOps***.\n\nGiven this high-level picture, in this tutorial, you will work toward a simple, yet efficient way to implement DevSecOps for your infrastructure through the example of deploying resources to five environments, gradually progressing from development to production.\n\n__Note:__ Even if I advocate embracing a FinOps approach and reducing the number of environments, sometimes there are excellent reasons to maintain more than just dev, staging, and production. So, please, adapt the examples below to match your needs.\n\n## What\n\nThe rise of cloud technology has driven the usage of IaC. Ansible and Terraform were among the first to pave the road here. OpenTofu, Pulumi, AWS CDK, Google Deploy Manager, and many others joined the party.\n\nDefining IaC is a perfect solution to feel safe when deploying infrastructure. You can test it, deploy it, and replay it again and again until you reach your goal.\n\nUnfortunately, we often see companies maintain several branches, or even repositories, for each of their target environments. And this is where the problems start. They are no longer enforcing a process. They are no longer ensuring that any change in the production code base has been accurately tested in previous environments. And they start seeing drifts from one environment to the other.\n\nI realized this tutorial was necessary when, at a conference I attended, every participant said they do not have a workflow that enforces the infrastructure to be tested thoroughly before being deployed to production. And they all agreed that sometimes they patch the code directly in production. Sure, this is fast, but is it safe? How do you report back to previous environments? How do you ensure there are no side effects? How do you control whether you are putting your company at risk with new vulnerabilities being pushed too quickly in production?\n\nThe question of *why* DevOps teams deploy directly to production is critical here. Is it because the pipeline could be more efficient or faster? Is there no automation? Or, even worse, because there is *no way to test accurately outside of production*?\n\nIn the next section, you will learn how to implement automation for your infrastructure and ensure that your DevOps team can effectively test what you are doing before pushing to any environment impacting others. You will see how your code is secured and its deployment is controlled, end-to-end.\n\n## How\n\nAs mentioned earlier, there are many IaC languages out there nowadays and we objectively cannot cover *all* of them in a single article. So, I will rely on a basic Terraform code running on Version 1.4. Please do not focus on the IaC language itself but instead on the process that you could apply to your own ecosystem.\n\n### The Terraform code\n\nLet's start with a fundamental Terraform code.\n\nWe are going to deploy to AWS, a virtual private cloud (VPC), which is a virtual network. In that VPC, we will deploy a public and a private subnet. As their name implies, they are subnets of the main VPC. Finally, we will add an Elastic Cloud Compute (EC2) instance (a virtual machine) in the public subnet.\n\nThis demonstrates the deployment of four resources without adding too much complexity. The idea is to focus on the pipeline, not the code.\n\nHere is the target we want to reach for your repository.\n\n![target for repository](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097033/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750097033415.png)\n\nLet’s do it step by step.\n\nFirst, we declare all resources in a `terraform/main.tf` file:\n\n```terraform\nprovider \"aws\" {\n  region = var.aws_default_region\n}\n\nresource \"aws_vpc\" \"main\" {\n  cidr_block = var.aws_vpc_cidr\n\n  tags = {\n    Name     = var.aws_resources_name\n  }\n}\n\nresource \"aws_subnet\" \"public_subnet\" {\n  vpc_id     = aws_vpc.main.id\n  cidr_block = var.aws_public_subnet_cidr\n\n  tags = {\n    Name = \"Public Subnet\"\n  }\n}\nresource \"aws_subnet\" \"private_subnet\" {\n  vpc_id     = aws_vpc.main.id\n  cidr_block = var.aws_private_subnet_cidr\n\n  tags = {\n    Name = \"Private Subnet\"\n  }\n}\n\nresource \"aws_instance\" \"sandbox\" {\n  ami           = var.aws_ami_id\n  instance_type = var.aws_instance_type\n\n  subnet_id = aws_subnet.public_subnet.id\n\n  tags = {\n    Name     = var.aws_resources_name\n  }\n}\n```\n\nAs you can see, there are a couple of variables that are needed for this code, so let's declare them in a `terraform/variables.tf` file:\n\n```terraform\nvariable \"aws_ami_id\" {\n  description = \"The AMI ID of the image being deployed.\"\n  type        = string\n}\n\nvariable \"aws_instance_type\" {\n  description = \"The instance type of the VM being deployed.\"\n  type        = string\n  default     = \"t2.micro\"\n}\n\nvariable \"aws_vpc_cidr\" {\n  description = \"The CIDR of the VPC.\"\n  type        = string\n  default     = \"10.0.0.0/16\"\n}\n\nvariable \"aws_public_subnet_cidr\" {\n  description = \"The CIDR of the public subnet.\"\n  type        = string\n  default     = \"10.0.1.0/24\"\n}\n\nvariable \"aws_private_subnet_cidr\" {\n  description = \"The CIDR of the private subnet.\"\n  type        = string\n  default     = \"10.0.2.0/24\"\n}\n\nvariable \"aws_default_region\" {\n  description = \"Default region where resources are deployed.\"\n  type        = string\n  default     = \"eu-west-3\"\n}\n\nvariable \"aws_resources_name\" {\n  description = \"Default name for the resources.\"\n  type        = string\n  default     = \"demo\"\n}\n```\n\nAlready, we are almost good to go on the IaC side. What's missing is a way to share the Terraform states. For those who don't know, Terraform works schematically doing the following:\n\n* `plan` checks the differences between the current state of the infrastructure and what is defined in the code. Then, it outputs the differences.\n* `apply` applies the differences in the `plan` and updates the state.\n\nFirst round, the state is empty, then it is filled with the details (ID, etc.) of the resources applied by Terraform.\n\nThe problem is: Where is that state stored? How do we share it so several developers can collaborate on code?\n\nThe solution is fairly simple: Leverage GitLab to store and share the state for you through a [Terraform HTTP backend](https://docs.gitlab.com/ee/user/infrastructure/iac/terraform_state.html).\n\nThe first step in using this backend is to create the most simple `terraform/backend.tf` file. The second step will be handled in the pipeline.\n\n```terraform\nterraform {\n  backend \"http\" {\n  }\n}\n```\n\nEt voilà! We have a bare minimum Terraform code to deploy these four resources. We will provide the variable values at the runtime, so let's do that later.\n\n### The workflow\n\nThe workflow that we are going to implement now is the following:\n\n\u003Cpre class=\"mermaid\">\nflowchart LR\n    A(main) -->|new feature| B(feature_X)\n\n    B -->|auto deploy| C[review/feature_X]\n    B -->|merge| D(main)\n    C -->|destroy| D\n\n    D -->|auto deploy| E[integration]\n    E -->|manual| F[qa]\n\n    D -->|tag| G(X.Y.Z)\n    F -->|validate| G\n\n    G -->|auto deploy| H[staging]\n    H -->|manual| I{plan}\n    I -->|manual| J[production]\n\u003C/pre>\n\n1. Create a **feature** branch. This will continuously run all scanners on the code to ensure that it is still compliant and secured. This code will be continuously deployed to a temporary environment `review/feature_branch` with the name of the current branch. This is a safe environment where the developers and operations teams can test their code without impacting anybody. This is also where we will enforce the process, like enforcing code reviews and running scanners, to ensure that the quality and security of the code are acceptable and do not put your assets at risk. The infrastructure deployed by this branch is automatically destroyed when the branch is closed. This helps you keep your budget under control.\n\n\u003Cpre class=\"mermaid\">\nflowchart LR\n    A(main) -->|new feature| B(feature_X)\n\n    B -->|auto deploy| C[review/feature_X]\n    B -->|merge| D(main)\n    C -->|destroy| D\n\u003C/pre>\n\n2. Once approved, the feature branch will be **merged** into the main branch. This is a [protected branch](https://docs.gitlab.com/ee/user/project/protected_branches.html) where no one can push. This is mandatory to ensure that every change request to production is thoroughly tested. That branch is also continuously deployed. The target here is the `integration` environment. To keep this environment slightly more stable, its deletion is not automated but can be triggered manually.\n\n\u003Cpre class=\"mermaid\">\nflowchart LR\n    D(main) -->|auto deploy| E[integration]\n\u003C/pre>\n\n3. From there, manual approval is required to trigger the next deployment. This will deploy the main branch to the `qa` environment. Here, I have set a rule to prevent deletion from the pipeline. The idea is that this environment should be quite stable (after all, it's already the third environment), and I would like to prevent deletion by mistake. Feel free to adapt the rules to match your processes.\n\n\u003Cpre class=\"mermaid\">\nflowchart LR\n    D(main)-->|auto deploy| E[integration]\n    E -->|manual| F[qa]\n\u003C/pre>\n\n4. To proceed, we will need to **tag** the code. We are relying on [protected tags](https://docs.gitlab.com/ee/user/project/protected_tags.html) here to ensure that only a specific set of users are allowed to deploy to these last two environments. This will immediately trigger a deployment to the `staging` environment.\n\n\u003Cpre class=\"mermaid\">\nflowchart LR\n    D(main) -->|tag| G(X.Y.Z)\n    F[qa] -->|validate| G\n\n    G -->|auto deploy| H[staging]\n\u003C/pre>\n\n5. Finally, we are landing to `production`. When discussing infrastructure, it is often challenging to deploy progressively (10%, 25%, etc.), so we will deploy the whole infrastructure. Still, we control that deployment with a manual trigger of this last step. And to enforce maximum control on this highly critical environment, we will control it as a [protected environment](https://docs.gitlab.com/ee/ci/environments/protected_environments.html).\n\n\u003Cpre class=\"mermaid\">\nflowchart LR\n    H[staging] -->|manual| I{plan}\n    I -->|manual| J[production]\n\u003C/pre>\n\n### The pipeline\n\nTo implement the above [workflow](#the-workflow), we are now going to implement a pipeline with two [downstream pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html).\n\n#### The main pipeline\n\nLet's start with the main pipeline. This is the one that will be triggered automatically on any **push to a feature branch**, any **merge to the default branch**, or any **tag**. *The one* that will do true **continuous deployment** to the following environments: `dev`, `integration`, and `staging`. And it is declared in the `.gitlab-ci.yml` file at the root of your project.\n\n![the repository target](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097033/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097033417.png)\n\n```yml\nStages:\n  - test\n  - environments\n\n.environment:\n  stage: environments\n  variables:\n    TF_ROOT: terraform\n    TF_CLI_ARGS_plan: \"-var-file=../vars/$variables_file.tfvars\"\n  trigger:\n    include: .gitlab-ci/.first-layer.gitlab-ci.yml\n    strategy: depend            # Wait for the triggered pipeline to successfully complete\n    forward:\n      yaml_variables: true      # Forward variables defined in the trigger job\n      pipeline_variables: true  # Forward manual pipeline variables and scheduled pipeline variables\n\nreview:\n  extends: .environment\n  variables:\n    environment: review/$CI_COMMIT_REF_SLUG\n    TF_STATE_NAME: $CI_COMMIT_REF_SLUG\n    variables_file: review\n    TF_VAR_aws_resources_name: $CI_COMMIT_REF_SLUG  # Used in the tag Name of the resources deployed, to easily differenciate them\n  rules:\n    - if: $CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH\n\nintegration:\n  extends: .environment\n  variables:\n    environment: integration\n    TF_STATE_NAME: $environment\n    variables_file: $environment\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n\nstaging:\n  extends: .environment\n  variables:\n    environment: staging\n    TF_STATE_NAME: $environment\n    variables_file: $environment\n  rules:\n    - if: $CI_COMMIT_TAG\n\n#### TWEAK\n# This tweak is needed to display vulnerability results in the merge widgets.\n# As soon as this issue https://gitlab.com/gitlab-org/gitlab/-/issues/439700 is resolved, the `include` instruction below can be removed.\n# Until then, the SAST IaC scanners will run in the downstream pipelines, but their results will not be available directly in the merge request widget, making it harder to track them.\n# Note: This workaround is perfectly safe and will not slow down your pipeline.\ninclude:\n  - template: Security/SAST-IaC.gitlab-ci.yml\n#### END TWEAK\n\n```\n\nThis pipeline runs only two stages: `test` and  `environments`. The former is needed for the *TWEAK* to run scanners. The later triggers a child pipeline with a different set of variables for each case defined above (push to the branch, merge to the default branch, or tag).\n\nWe are adding here a dependency with the keyword [strategy:depend](https://docs.gitlab.com/ee/ci/yaml/index.html#triggerstrategy) on our child pipeline so the pipeline view in GitLab will be updated only once the deployment is finished.\n\nAs you can see here, we are defining a base job, [hidden](https://docs.gitlab.com/ee/ci/jobs/#hide-jobs), and we are extending it with specific variables and rules to trigger only one deployment for each target environment.\n\nBesides the [predefined variables](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html), we are using two new entries that we need to define:\n1. [The variables specific](#the-variable-definitions) to each environment: `../vars/$variables_file.tfvars`\n2. [The child pipeline](#the-child-pipeline), defined in `.gitlab-ci/.first-layer.gitlab-ci.yml`\n\nLet's start with the smallest part, the variable definitions.\n\n### The variable definitions\n\nWe are going here to mix two solutions to provide variables to Terraform:\n\n* The first one using [.tfvars files](https://developer.hashicorp.com/terraform/language/values/variables#variable-definitions-tfvars-files) for all non-sensitive input, which should be stored within GitLab.\n\n![solution one to provide variables to Terraform](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097034/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097033419.png)\n\n* The second using [environment variables](https://developer.hashicorp.com/terraform/language/values/variables#environment-variables) with the prefix `TF_VAR`. That second way to inject variables, associated with the GitLab capacity to [mask variables](https://docs.gitlab.com/ee/ci/variables/#mask-a-cicd-variable), [protect them](https://docs.gitlab.com/ee/ci/variables/#protect-a-cicd-variable), and [scope them to environments](https://docs.gitlab.com/ee/ci/environments/index.html#limit-the-environment-scope-of-a-cicd-variable) is a powerful solution to **prevent sensitive information leakages**. (If you consider your production’s private CIDR very sensitive, you could protect it like this, ensuring it is only available for the `production` environment, for pipelines running against protected branches and tags, and that its value is masked in the job’s logs.)\n\n![solution two to provide variables to Terraform](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097034/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097033422.png)\n\nAdditionally, each variable file should be controlled through a [`CODEOWNERS` file](https://docs.gitlab.com/ee/user/project/codeowners/) to set who can modify each of them.\n\n```\n[Production owners] \nvars/production.tfvars @operations-group\n\n[Staging owners]\nvars/staging.tfvars @odupre @operations-group\n\n[CodeOwners owners]\nCODEOWNERS @odupre\n```\n\nThis article is not a Terraform training, so we will go very fast and simply show here the `vars/review.tfvars` file. Subsequent environment files are, of course, very similar. Just set the non-sensitive variables and their values here.\n\n```shell\naws_vpc_cidr = \"10.1.0.0/16\"\naws_public_subnet_cidr = \"10.1.1.0/24\"\naws_private_subnet_cidr = \"10.1.2.0/24\"\n```\n\n#### The child pipeline\n\nThis one is where the actual work is done. So, it is slightly more complex than the first one. But there is no difficulty here that we cannot overcome together!\n\nAs we have seen in the definition of the [main pipeline](#the-main-pipeline), that downstream pipeline is declared in the file `.gitlab-ci/.first-layer.gitlab-ci.yml`.\n\n![Downstream pipeline declared in file](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097033/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097033424.png)\n\nLet's break it down into small chunks. We'll see the big picture at the end.\n\n##### Run Terraform commands and secure the code\n\nFirst, we want to run a pipeline for Terraform. We, at GitLab, are open source. So, our Terraform template is open source. And you simply need to include it. This can be achieved with the following snippet:\n\n```yml\ninclude:\n  - template: Terraform.gitlab-ci.yml\n```\n\nThis template runs for you the Terraform checks on the formatting and validates your code, before planning and applying it. It also allows you to destroy what you have deployed.\n\nAnd, because GitLab is the a single, unified DevSecOps platform, we are also automatically including two security scanners within that template to find potential threats in your code and warn you before you deploy it to the next environments.\n\nNow that we have checked, secured, built, and deployed our code, let's do some tricks.\n\n##### Share cache between jobs\n\nWe will cache the job results to reuse them in subsequent pipeline jobs. This is as simple as adding the following piece of code:\n\n```yml\ndefault:\n  cache:  # Use a shared cache or tagged runners to ensure terraform can run on apply and destroy\n    - key: cache-$CI_COMMIT_REF_SLUG\n      fallback_keys:\n        - cache-$CI_DEFAULT_BRANCH\n      paths:\n        - .\n```\n\nHere, we are defining a different cache for each commit, falling back to the main branch name if needed.\n\nIf we look carefully at the templates that we are using, we can see that it has some rules to control when jobs are run. We want to run all controls (both QA and security) on all branches. So, we are going to override these settings.\n\n##### Run controls on all branches\n\nGitLab templates are a powerful feature where one can override only a piece of the template. Here, we are interested only in overwriting the rules of some jobs to always run quality and security checks. Everything else defined for these jobs will stay as defined in the template.\n\n```yml\nfmt:\n  rules:\n    - when: always\n\nvalidate:\n  rules:\n    - when: always\n\nkics-iac-sast:\n  rules:\n    - when: always\n\niac-sast:\n  rules:\n    - when: always\n```\n\nNow that we have enforced the quality and security controls, we want to differentiate how the main environments (integration and staging) in the [workflow](#the-workflow) and review environments behave. Let's start by defining the main environment’s behavior, and we will tweak this configuration for the review environments.\n\n##### CD to integration and staging\n\nAs defined earlier, we want to deploy the main branch and the tags to these two environments. We are adding rules to control that on both the `build` and `deploy` jobs. Then, we want to enable `destroy` only for the `integration` as we have defined `staging` to be too critical to be deleted with a single click. This is error-prone and we don't want to do that.\n\nFinally, we are linking the `deploy` job to the `destroy` one, so we can `stop` the environment directly from GitLab GUI.\n\nThe `GIT_STRATEGY` is here to prevent retrieving the code from the source branch in the runner when destroying. This would fail if the branch has been deleted manually, so we are relying on the cache to get everything we need to run the Terraform instructions.\n\n```yml\nbuild:  # terraform plan\n  environment:\n    name: $TF_STATE_NAME\n    action: prepare\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n    - if: $CI_COMMIT_TAG\n\ndeploy: # terraform apply --> automatically deploy on corresponding env (integration or staging) when merging to default branch or tagging. Second layer environments (qa and production) will be controlled manually\n  environment: \n    name: $TF_STATE_NAME\n    action: start\n    on_stop: destroy\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n    - if: $CI_COMMIT_TAG\n\ndestroy:\n  extends: .terraform:destroy\n  variables:\n    GIT_STRATEGY: none\n  dependencies:\n    - build\n  environment:\n    name: $TF_STATE_NAME\n    action: stop\n  rules:\n    - if: $CI_COMMIT_TAG  # Do not destroy production\n      when: never\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $TF_DESTROY == \"true\" # Manually destroy integration env.\n      when: manual\n```\n\nAs said, this matches the need to deploy to `integration` and `staging`. But we are still missing a temporary environment where the developers can experience and validate their code without impacts on others. This is where the deployment to the `review` environment takes place.\n\n##### CD to review environments\n\nDeploying to review environment is not too different than deploying to `integration` and `staging`. So we will once again leverage GitLab's capacity to overwrite only pieces of job definition here.\n\nFirst, we set rules to run these jobs only on feature branches.\n\nThen, we link the `deploy_review` job to `destroy_review`. This will allow us to stop the environment **manually** from the GitLab user interface, but more importantly, it will **automatically trigger the environment destruction** when the feature branch is closed. This is a good FinOps practice to help you control your operational expenditures.\n\nSince Terraform needs a plan file to destroy an infrastructure, exactly like it needs one to build an infrastructure, then we are adding a dependency from `destroy_review` to `build_review`, to retrieve its artifacts.\n\nFinally, we see here that the environment's name is set to `$environment`. It has been set in the [main pipeline](#the-main-pipeline) to `review/$CI_COMMIT_REF_SLUG`, and forwarded to this child pipeline with the instruction `trigger:forward:yaml_variables:true`.\n\n```yml\nbuild_review:\n  extends: build\n  rules:\n    - if: $CI_COMMIT_TAG\n      when: never\n    - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH\n      when: on_success\n\ndeploy_review:\n  extends: deploy\n  dependencies:\n    - build_review\n  environment:\n    name: $environment\n    action: start\n    on_stop: destroy_review\n    # url: https://$CI_ENVIRONMENT_SLUG.example.com\n  rules:\n    - if: $CI_COMMIT_TAG\n      when: never\n    - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH\n      when: on_success\n\ndestroy_review:\n  extends: destroy\n  dependencies:\n    - build_review\n  environment:\n    name: $environment\n    action: stop\n  rules:\n    - if: $CI_COMMIT_TAG  # Do not destroy production\n      when: never\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH   # Do not destroy staging\n      when: never\n    - when: manual\n```\n\nSo, to recap, we now have a pipeline that can:\n\n* Deploy temporary review environments, which are automatically cleaned up when the feature branch is closed\n* Continuously deploy the **default branch** to `integration`\n* Continuously deploy the **tags** to `staging`\n\nLet's now add an extra layer, where we will deploy, based on a manual trigger this time, to `qa` and `production` environments.\n\n##### Continously deploy to QA and production\n\nBecause not everybody is willing to deploy continuously to production, we will add a manual validation to the next two deployments. From a purely **CD** perspective, we would not add this trigger, but take this as an opportunity to learn how to run jobs from other triggers.\n\nSo far, we have started a [child pipeline](#the-child-pipeline) from the [main pipeline](#the-main-pipeline) to run all deployments.\n\nSince we want to run other deployments from the default branch and the tags, we will add another layer dedicated to these additional steps. Nothing new here. We will just repeat exactly the same process as the one we only did for the [main pipeline](#the-main-pipeline). Going this way allows you to manipulate as many layers as you need. I have already seen up to nine environments in some places.\n\nWithout arguing once again on the benefits to have fewer environments, the process that we are using here makes it very easy to implement the same pipeline all the way from early stages to final delivery, while keeping your pipeline definition simple and split in small chunks that you can maintain at no cost.\n\nTo prevent variable conflicts here, we are just using new var names to identify the Terraform state and input file.\n\n```yml\n.2nd_layer:\n  stage: 2nd_layer\n  variables:\n    TF_ROOT: terraform\n  trigger:\n    include: .gitlab-ci/.second-layer.gitlab-ci.yml\n    # strategy: depend            # Do NOT wait for the downstream pipeline to finish to mark upstream pipeline as successful. Otherwise, all pipelines will fail when reaching the pipeline timeout before deployment to 2nd layer.\n    forward:\n      yaml_variables: true      # Forward variables defined in the trigger job\n      pipeline_variables: true  # Forward manual pipeline variables and scheduled pipeline variables\n\nqa:\n  extends: .2nd_layer\n  variables:\n    TF_STATE_NAME_2: qa\n    environment: $TF_STATE_NAME_2\n    TF_CLI_ARGS_plan_2: \"-var-file=../vars/$TF_STATE_NAME_2.tfvars\"\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n\nproduction:\n  extends: .2nd_layer\n  variables:\n    TF_STATE_NAME_2: production\n    environment: $TF_STATE_NAME_2\n    TF_CLI_ARGS_plan_2: \"-var-file=../vars/$TF_STATE_NAME_2.tfvars\"\n  rules:\n    - if: $CI_COMMIT_TAG\n```\n\n**One important trick here is the strategy used for the new downstream pipeline.** We leave that `trigger:strategy` to its default value; otherwise, the [main pipeline](#the-main-pipeline) would wait for your [grand-child pipeline](#the-grand-child-pipeline) to finish. With a manual trigger, this could last for a very long time and make your pipeline dashboard harder to read and understand.\n\nYou have probably already wondered what is the content of that `.gitlab-ci/.second-layer.gitlab-ci.yml` file we are including here.  We will cover that in the next section.\n\n##### The first layer complete pipeline definition\n\nIf you are looking for a complete view of this first layer (stored in `.gitlab-ci/.first-layer.gitlab-ci.yml`), just expand the section below.\n\n```yml\nvariables:\n  TF_VAR_aws_ami_id: $AWS_AMI_ID\n  TF_VAR_aws_instance_type: $AWS_INSTANCE_TYPE\n  TF_VAR_aws_default_region: $AWS_DEFAULT_REGION\n\ninclude:\n  - template: Terraform.gitlab-ci.yml\n\ndefault:\n  cache:  # Use a shared cache or tagged runners to ensure terraform can run on apply and destroy\n    - key: cache-$CI_COMMIT_REF_SLUG\n      fallback_keys:\n        - cache-$CI_DEFAULT_BRANCH\n      paths:\n        - .\n\nstages:\n  - validate\n  - test\n  - build\n  - deploy\n  - cleanup\n  - 2nd_layer       # Use to deploy a 2nd environment on both the main branch and on the tags\n\nfmt:\n  rules:\n    - when: always\n\nvalidate:\n  rules:\n    - when: always\n\nkics-iac-sast:\n  rules:\n    - if: $SAST_DISABLED == 'true' || $SAST_DISABLED == '1'\n      when: never\n    - if: $SAST_EXCLUDED_ANALYZERS =~ /kics/\n      when: never\n    - when: on_success\n\niac-sast:\n  rules:\n    - if: $SAST_DISABLED == 'true' || $SAST_DISABLED == '1'\n      when: never\n    - if: $SAST_EXCLUDED_ANALYZERS =~ /kics/\n      when: never\n    - when: on_success\n\n###########################################################################################################\n## Integration env. and Staging. env\n##  * Auto-deploy to Integration on merge to main.\n##  * Auto-deploy to Staging on tag.\n##  * Integration can be manually destroyed if TF_DESTROY is set to true.\n##  * Destroy of next env. is not automated to prevent errors.\n###########################################################################################################\nbuild:  # terraform plan\n  environment:\n    name: $TF_STATE_NAME\n    action: prepare\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n    - if: $CI_COMMIT_TAG\n\ndeploy: # terraform apply --> automatically deploy on corresponding env (integration or staging) when merging to default branch or tagging. Second layer environments (qa and production) will be controlled manually\n  environment: \n    name: $TF_STATE_NAME\n    action: start\n    on_stop: destroy\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n    - if: $CI_COMMIT_TAG\n\ndestroy:\n  extends: .terraform:destroy\n  variables:\n    GIT_STRATEGY: none\n  dependencies:\n    - build\n  environment:\n    name: $TF_STATE_NAME\n    action: stop\n  rules:\n    - if: $CI_COMMIT_TAG  # Do not destroy production\n      when: never\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $TF_DESTROY == \"true\" # Manually destroy integration env.\n      when: manual\n###########################################################################################################\n\n###########################################################################################################\n## Dev env.\n##  * Temporary environment. Lives and dies with the Merge Request.\n##  * Auto-deploy on push to feature branch.\n##  * Auto-destroy on when Merge Request is closed.\n###########################################################################################################\nbuild_review:\n  extends: build\n  rules:\n    - if: $CI_COMMIT_TAG\n      when: never\n    - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH\n      when: on_success\n\ndeploy_review:\n  extends: deploy\n  dependencies:\n    - build_review\n  environment:\n    name: $environment\n    action: start\n    on_stop: destroy_review\n    # url: https://$CI_ENVIRONMENT_SLUG.example.com\n  rules:\n    - if: $CI_COMMIT_TAG\n      when: never\n    - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH\n      when: on_success\n\ndestroy_review:\n  extends: destroy\n  dependencies:\n    - build_review\n  environment:\n    name: $environment\n    action: stop\n  rules:\n    - if: $CI_COMMIT_TAG  # Do not destroy production\n      when: never\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH   # Do not destroy staging\n      when: never\n    - when: manual\n###########################################################################################################\n\n###########################################################################################################\n## Second layer\n##  * Deploys from main branch to qa env.\n##  * Deploys from tag to production.\n###########################################################################################################\n.2nd_layer:\n  stage: 2nd_layer\n  variables:\n    TF_ROOT: terraform\n  trigger:\n    include: .gitlab-ci/.second-layer.gitlab-ci.yml\n    # strategy: depend            # Do NOT wait for the downstream pipeline to finish to mark upstream pipeline as successful. Otherwise, all pipelines will fail when reaching the pipeline timeout before deployment to 2nd layer.\n    forward:\n      yaml_variables: true      # Forward variables defined in the trigger job\n      pipeline_variables: true  # Forward manual pipeline variables and scheduled pipeline variables\n\nqa:\n  extends: .2nd_layer\n  variables:\n    TF_STATE_NAME_2: qa\n    environment: $TF_STATE_NAME_2\n    TF_CLI_ARGS_plan_2: \"-var-file=../vars/$TF_STATE_NAME_2.tfvars\"\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n\nproduction:\n  extends: .2nd_layer\n  variables:\n    TF_STATE_NAME_2: production\n    environment: $TF_STATE_NAME_2\n    TF_CLI_ARGS_plan_2: \"-var-file=../vars/$TF_STATE_NAME_2.tfvars\"\n  rules:\n    - if: $CI_COMMIT_TAG\n###########################################################################################################\n```\n\nAt this stage, we are already deploying safely to three environments. That is my personal ideal recommendation. However, if you need more environments, add them to your CD pipeline.\n\nYou have certainly already noted that we include a downstream pipeline with the keyword `trigger:include`. This includes the file `.gitlab-ci/.second-layer.gitlab-ci.yml`. We want to run almost the same pipeline so obviously, its content is very similar to the one we have detailed above. The main advantage here to define this [grand-child pipeline](#the-grand-child-pipeline) is that it lives on its own, making both variables and rules way easier to define.\n\n### The grand-child pipeline\n\nThis second layer pipeline is a brand new pipeline. Hence, it needs to mimic the first layer definition with:\n\n* [Inclusion of the Terraform template](#run-terraform-commands-and-secure-the-code).\n* [Enforcement of security checks](#run-controls-on-all-branches). Terraform validation would be duplicates of the first layer, but security scanners may find threats that did not yet exist when scanners previously ran (for example, if you deploy to production a couple of days after your deployment to staging).\n* [Overwrite build and deploy jobs to set specific rules](#cd-to-review-environments). Note that the `destroy` stage is no longer automated to prevent too fast deletions.\n\nAs explained above, the `TF_STATE_NAME` and `TF_CLI_ARGS_plan` have been provided from the [main pipeline](#the-main-pipeline) to the [child pipeline](#the-child-pipeline). We needed another variable name to pass these values from the [child pipeline](#the-child-pipeline) to here, the [grand-child pipeline](#the-grand-child-pipeline). This is why they are postfixed with `_2` in the child pipeline and the value is copied back to the appropriate variable during the `before_script` here.\n\nSince we have already broken down each step above, we can zoom out here directly to the broad view of the global second layer definition (stored in `.gitlab-ci/.second-layer.gitlab-ci.yml`).\n\n```yml\n# Use to deploy a second environment on both the default branch and the tags.\n\ninclude:\n  template: Terraform.gitlab-ci.yml\n\nstages:\n  - validate\n  - test\n  - build\n  - deploy\n\nfmt:\n  rules:\n    - when: never\n\nvalidate:\n  rules:\n    - when: never\n\nkics-iac-sast:\n  rules:\n    - if: $SAST_DISABLED == 'true' || $SAST_DISABLED == '1'\n      when: never\n    - if: $SAST_EXCLUDED_ANALYZERS =~ /kics/\n      when: never\n    - when: always\n\n###########################################################################################################\n## QA env. and Prod. env\n##  * Manually trigger build and auto-deploy in QA\n##  * Manually trigger both build and deploy in Production\n##  * Destroy of these env. is not automated to prevent errors.\n###########################################################################################################\nbuild:  # terraform plan\n  cache:  # Use a shared cache or tagged runners to ensure terraform can run on apply and destroy\n    - key: $TF_STATE_NAME_2\n      fallback_keys:\n        - cache-$CI_DEFAULT_BRANCH\n      paths:\n        - .\n  environment:\n    name: $TF_STATE_NAME_2\n    action: prepare\n  before_script:  # Hack to set new variable values on the second layer, while still using the same variable names. Otherwise, due to variable precedence order, setting new value in the trigger job, does not cascade these new values to the downstream pipeline\n    - TF_STATE_NAME=$TF_STATE_NAME_2\n    - TF_CLI_ARGS_plan=$TF_CLI_ARGS_plan_2\n  rules:\n    - when: manual\n\ndeploy: # terraform apply\n  cache:  # Use a shared cache or tagged runners to ensure terraform can run on apply and destroy\n    - key: $TF_STATE_NAME_2\n      fallback_keys:\n        - cache-$CI_DEFAULT_BRANCH\n      paths:\n        - .\n  environment: \n    name: $TF_STATE_NAME_2\n    action: start\n  before_script:  # Hack to set new variable values on the second layer, while still using the same variable names. Otherwise, due to variable precedence order, setting new value in the trigger job, does not cascade these new values to the downstream pipeline\n    - TF_STATE_NAME=$TF_STATE_NAME_2\n    - TF_CLI_ARGS_plan=$TF_CLI_ARGS_plan_2\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n    - if: $CI_COMMIT_TAG && $TF_AUTO_DEPLOY == \"true\"\n    - if: $CI_COMMIT_TAG\n      when: manual\n###########################################################################################################\n```\n\nEt voilà. **We are ready to go.** Feel free to change the way you control your job executions, leveraging for example GitLab's capacity to [delay a job](https://docs.gitlab.com/ee/ci/jobs/job_control.html#run-a-job-after-a-delay) before deploying to production.\n\n## Try it yourself\n\nWe finally reached our destination. We are now able to control **deployments to five different environments**, with only the **feature branches**, the **main branch**, and **tags**.\n* We are intensively reusing GitLab open source templates to ensure efficiency and security in our pipelines.\n* We are leveraging GitLab template capacities to overwrite only the blocks that need custom control.\n* We have split the pipeline in small chunks, controlling the downstream pipelines to match exactly what we need.\n\nFrom there, the floor is yours. You could, for example, easily update the main pipeline to trigger downstream pipelines for your software source code, with the [trigger:rules:changes](https://docs.gitlab.com/ee/ci/yaml/#ruleschanges) keyword. And use another [template](https://gitlab.com/gitlab-org/gitlab/-/tree/master/lib/gitlab/ci/templates/) depending on the changes that happened. But that is another story.",[110,832,937,480,726],"CD",{"slug":939,"featured":6,"template":678},"using-child-pipelines-to-continuously-deploy-to-five-environments","content:en-us:blog:using-child-pipelines-to-continuously-deploy-to-five-environments.yml","Using Child Pipelines To Continuously Deploy To Five Environments","en-us/blog/using-child-pipelines-to-continuously-deploy-to-five-environments.yml","en-us/blog/using-child-pipelines-to-continuously-deploy-to-five-environments",{"_path":945,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":946,"content":952,"config":960,"_id":962,"_type":16,"title":963,"_source":17,"_file":964,"_stem":965,"_extension":20},"/en-us/blog/navigate-application-architecture-drift-and-organizational-alignment",{"title":947,"description":948,"ogTitle":947,"ogDescription":948,"noIndex":6,"ogImage":949,"ogUrl":950,"ogSiteName":692,"ogType":693,"canonicalUrls":950,"schema":951},"Navigate application architecture drift and organizational alignment","Explore how to manage architecture drift by balancing simplification and team realignment using the FINE Analysis. Ensure efficiency and agility throughout an application's lifecycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676151/Blog/Hero%20Images/navigation.jpg","https://about.gitlab.com/blog/navigate-application-architecture-drift-and-organizational-alignment","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Navigate application architecture drift and organizational alignment\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Stephen Walters\"},{\"@type\":\"Person\",\"name\":\"Lee Faus\"}],\n        \"datePublished\": \"2024-09-18\",\n      }",{"title":947,"description":948,"authors":953,"heroImage":949,"date":956,"body":957,"category":14,"tags":958},[954,955],"Stephen Walters","Lee Faus","2024-09-18","Application architecture drift is a common phenomenon in software development as projects evolve and grow in complexity. When this happens, you face a critical decision: Should you simplify the application architecture to fit the current team topologies, or should you adjust your team topologies to match the changing application architecture? This decision is pivotal for maintaining efficiency, innovation, and success throughout an application's lifecycle, which spans years and includes stages from experimentation to production, maintenance, and, ultimately, the end of support.\n\n## What is application architecture drift?\n\nApplication architecture drift occurs when the initial design and structure of an application no longer align with its current state due to continuous improvement, feature additions, and technology advancements. This drift can lead to increased complexity, technical debt, and potential performance bottlenecks if not properly managed.\n\nTo effectively manage such drift, it's essential to consider the teaming topology that supports the application at different levels of application maturity. If your team structure does not accommodate the complexities of the application architecture, you risk a failure on deliverables, which leads to poor customer satisfaction metrics and loss of customer adoption.\n\nWhen team topologies properly adjust to application complexities, poor decisions on implementation details are minimized, resulting in a more scalable and resilient application architecture. As Conway’s Law states, “Organizations which design systems are constrained to produce systems which are copies of the communication structures of these organizations.\" This can result in unintended design additions due to organizational setup. In most cases, these additions can lead to weakened team identities, uncertain responsibilities, and poor team interactions and communications.\n\n## The role of team topologies in managing architecture drift\n\n[Team topologies](https://teamtopologies.com/) refer to the roles and responsibilities within a team and how they are organized to deliver value. When an application's architecture changes, it's crucial to assess whether your team structure is still optimal or if adjustments are needed. The FINE Analysis, as defined in the [Value Stream Reference Architecture](https://www.vsmconsortium.org/value-stream-reference-architectures), provides a valuable lens for evaluating and realigning team topologies throughout an application's lifecycle. FINE is defined as:\n\n* F = Flow of work\n* I = Impediments that slow down the flow of work\n* N = Needs that drive the potential for flow to happen\n* E = Effort that is used in the form of cognitive load\n\n### Experimentation phase\n\n- **Architecture:** Simple, flexible, and exploratory\n- **Team topology:** Small, cross-functional, adaptive teams\n- **FINE Analysis:** Stream-aligned teams will have a high Flow of work, with little initial Impedance, but with much fluctuation. Needs will be high, with a heavy reliance on enabling teams to establish standards and templates.\n- **Application architecture drift:** This will be frequent with rapid and constant change, but manageable due to early simplicity and smaller adaptive teams\n\n### Production phase\n\n- **Architecture:** More defined, scalable, and robust\n- **Team topology:** Larger, responsible, perceptible teams\n- **FINE Analysis:** The Flow of work is stabilized, but impedance starts to collect in the form of technical debt, issues and vulnerabilities. This drives up the Effort required on stream-aligned teams. At this point, enabling teams should have established ways of working and platform groups should start to reduce the cognitive load on teams. Complicated sub-systems will be defined and should be closely controlled.\n- **Application architecture drift:** This will be frequent with rapid and constant change as before. However, larger teams and a more robust but changing architecture will require higher levels of monitoring and management.\n\n### Maintenance phase\n\n- **Architecture:** Mature, stable, and optimized for efficiency\n- **Team topology:** Sustaining teams\n- **FINE Analysis:** The Needs for stream-aligned teams will reduce and be more dependent on actual customer and business outcomes. The Flow of work is much more impacted by Impediments, in particular any production issues. The Effort on teams can become exhaustive if platform groups are not stabilized and effective, enabling teams have to be responsive to continuous improvement.\n- **Application architecture drift:** Architectural changes will be far less frequent, and team structures will be aligned to ensure system stability. This is dependent upon the stability when exiting the production phase.\n\n### End-of-support phase\n\n- **Architecture:** Legacy, minimal updates, and decommissioning planning\n- **Team topology:** Transition teams\n- **FINE Analysis:** Flow of work is drastically reduced. Impedance will move to one of two ways, either reducing as production issues scale down due to lower customer usage, or increasing at a high cost due to legacy systems.\n- **Application architecture drift:** Minimal, if any, architectural drift, as teams should be focused on decommissioning over production.\n\n## Balancing simplification and realignment\n\nThe experimentation phase is important to establish the correct disciplines from the outset. The greatest risk of application architecture drift is then in the production phase. In maintenance, this risk is reduced, and, by end of support, should be negligible. So it is during the production phase, potentially the longest living phase for any business system, that we must ensure strong discipline to prevent the drift.\n\nWhen faced with architecture drift, organizations must decide between simplifying the application architecture to fit existing team topologies or adjusting team topologies to match the evolving architecture. Both approaches have their merits:\n\n- **Simplifying application architecture:** This approach can reduce complexity and technical debt, making it easier for existing teams to manage the application. However, it may limit the application's potential for growth and innovation.\n- **Adjusting team topologies:** Realigning teams to match the evolving architecture can enhance the application's capabilities and performance. This approach requires a more flexible organizational model and may involve retraining or restructuring teams.\n\nA key aspect is to consider **when** to make these adjustments, and the answer is as soon as possible. To leave adaptations for too long can cause the architectural drift to become so large that it inevitably leads to one of two events:\n\n- **Massive re-architecture:** This approach will lead to reduced effort in delivering new customer value, impacting business outcomes. In its own right, it can generate massive technical debt and increased work backlog for future efforts, resulting in increased team cognitive load.\n\n- **Re-organization:** Realigning teams on any kind of large scale will most certainly impact team morale. It can lead to a strain on key people and result in higher churn, especially of innovative talent. This can lead to lost IP knowledge and a future skills shortage, which in turn has an impact on the future quality of the designs and applications produced.\n\n## Next steps\nManaging application architecture drift is an ongoing challenge that requires a strategic approach to organizational alignment. By leveraging the FINE Analysis of the Value Stream Reference Architecture and understanding the different phases of an application's lifecycle, you can make informed decisions about team topologies and ensure your organization remains agile and efficient. Whether you choose to simplify application architecture or adjust your team structure, the key is to maintain a balance that supports both current needs and future growth.\n\n> [Learn how to manage application value streams](https://about.gitlab.com/solutions/value-stream-management/) with the GitLab DevSecOps platform.\n",[725,959],"design",{"slug":961,"featured":92,"template":678},"navigate-application-architecture-drift-and-organizational-alignment","content:en-us:blog:navigate-application-architecture-drift-and-organizational-alignment.yml","Navigate Application Architecture Drift And Organizational Alignment","en-us/blog/navigate-application-architecture-drift-and-organizational-alignment.yml","en-us/blog/navigate-application-architecture-drift-and-organizational-alignment",{"_path":967,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":968,"content":974,"config":982,"_id":984,"_type":16,"title":985,"_source":17,"_file":986,"_stem":987,"_extension":20},"/en-us/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features",{"title":969,"description":970,"ogTitle":969,"ogDescription":970,"noIndex":6,"ogImage":971,"ogUrl":972,"ogSiteName":692,"ogType":693,"canonicalUrls":972,"schema":973},"Building GitLab with GitLab: A multi-region service to deliver AI features","Discover how we built our first multi-region deployment for teams at GitLab using the platform's many features, helping create a frictionless developer experience for GitLab Duo users.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098664/Blog/Hero%20Images/Blog/Hero%20Images/building-gitlab-with-gitlab-no-type_building-gitlab-with-gitlab-no-type.png_1750098663794.png","https://about.gitlab.com/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Building GitLab with GitLab: A multi-region service to deliver AI features\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chance Feick\"},{\"@type\":\"Person\",\"name\":\"Sam Wiskow\"}],\n        \"datePublished\": \"2024-09-12\",\n      }",{"title":969,"description":970,"authors":975,"heroImage":971,"date":978,"body":979,"category":14,"tags":980},[976,977],"Chance Feick","Sam Wiskow","2024-09-12","For GitLab Duo, real-time AI-powered capabilities like [Code Suggestions](https://about.gitlab.com/solutions/code-suggestions/) need low-latency response times for a frictionless developer experience. Users don’t want to interrupt their flow and wait for a code suggestion to show up. To ensure GitLab Duo can provide the right suggestion at the right time and meet high performance standards for critical AI infrastructure, GitLab recently launched our first multi-region service to deliver AI features.\n\nIn this article, we will cover the benefits of multi-region services, how we built an internal platform codenamed ‘Runway’ for provisioning and deploying multi-region services using GitLab features, and the lessons learned migrating to multi-region in production.\n\n## Background on the project\n\nRunway is GitLab’s internal platform as a service (PaaS) for provisioning, deploying, and operating containerized services. Runway's purpose is to enable GitLab service owners to self-serve infrastructure needs with production readiness out of the box, so application developers can focus on providing value to customers. As part of [our corporate value of dogfooding](https://handbook.gitlab.com/handbook/values/#results), the first iteration was built in 2023 by the Infrastructure department on top of core GitLab capabilities, such as continuous integration/continuous delivery ([CI/CD](https://about.gitlab.com/topics/ci-cd/)), environments, and deployments.\n\nBy establishing automated GitOps best practices, Runway services use infrastructure as code (IaC), merge requests (MRs), and CI/CD by default.\n\nGitLab Duo is primarily powered by [AI Gateway](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist), a satellite service written in Python outside of GitLab’s modular monolith written in Ruby. In cloud computing, a region is a geographical location of data centers operated by cloud providers.\n\n## Defining a multi-region strategy\n\nDeploying in a single region is a good starting point for most services, but can come with downsides when you are trying to reach a global audience. Users who are geographically far from where your service is deployed may experience different levels of service and responsiveness than those who are closer. This can lead to a poor user experience, even if your service is well built in all other respects.\n\nFor AI Gateway, it was important to meet global customers wherever they are located, whether on GitLab.com or self-managed instances using Cloud Connector. When a developer is deciding to accept or reject a code suggestion, milliseconds matter and can define the user experience.\n\n### Goals\n\nMulti-region deployments require more infrastructure complexity, but for use cases where latency is a core component of the user experience, the benefits often outweigh the downsides. First, multi-region deployments offer increased responsiveness to the user. By serving requests from locations closest to end users, latency can be significantly reduced. Second, multi-region deployments provide greater availability. With fault tolerance, services can fail over during a regional outage. There is a much lower chance of a service failing completely, meaning users should not be interrupted even in partial failures.\n\nBased on our goals for performance and availability, we used this opportunity to create a scalable multi-region strategy in Runway, which is built leveraging GitLab features.\n\n### Architecture\n\nIn SaaS platforms, GitLab.com’s infrastructure is hosted on Google Cloud Platform (GCP). As a result, Runway’s first supported platform runtime is Cloud Run. The initial workloads deployed on Runway are stateless satellite services (e.g., AI Gateway), so Cloud Run services are a good fit that provide a clear migration path to more complex and flexible platform runtimes, e.g. Kubernetes.\n\nBuilding Runway on top of GCP Cloud Run using GitLab has allowed us to iterate and tease out the right level of abstractions for service owners as part of a platform play in the Infrastructure department.\n\nTo serve traffic from multiple regions in Cloud Run, the multi-region deployment strategy must support global load balancing, and the provisioning and configuration of regional resources. Here’s a simplified diagram of the proposed architecture in GCP:\n\n![simplified diagram of the proposed architecture in GCP](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098671/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098671612.png)\n\nBy replicating Cloud Run services across multiple regions and configuring the existing global load balancing with serverless network endpoint group (NEG) backends, we’re able to serve traffic from multiple regions. For the remainder of the article, we’ll focus less on specifics of Cloud Run and more on how we’re building with GitLab.\n\n## Building a multi-region platform with GitLab\n\nNow that you have context about Runway, let's walk through how to build a multi-region platform using GitLab features.\n\n### Provision\n\nWhen building an internal platform, the first challenge is provisioning infrastructure for a service. In Runway, Provisioner is the component that is responsible for maintaining a service inventory and managing IaC for GCP resources using Terraform.\n\nTo provision a service, an application developer will open an MR to add a service project to the inventory using git, and Provisioner will create required resources, such as service accounts and identity and access management policies. When building this functionality with GitLab, Runway leverages [OpenID Connect (OIDC) with GPC Workload Identity Federation](https://docs.gitlab.com/ee/ci/cloud\\_services/google\\_cloud/) for managing IaC.\n\nAdditionally, Provisioner will create a deployment project for each service project. The purpose of creating separate projects for deployments is to ensure the [principle of least privilege](https://about.gitlab.com/blog/the-ultimate-guide-to-least-privilege-access-with-gitlab/) by authenticating as a GCP service account with restricted permissions. Runway leverages the [Projects API](https://docs.gitlab.com/ee/api/projects.html) for creating projects with [Terraform provider](https://registry.terraform.io/providers/gitlabhq/gitlab/latest/docs).\n\nFinally, Provisioner defines variables in the deployment project for the service account, so that deployment CI jobs can authenticate to GCP. Runway leverages [CI/CD variables](https://docs.gitlab.com/ee/ci/variables/) and [Job Token allowlist](https://docs.gitlab.com/ee/ci/jobs/ci\\_job\\_token.html\\#add-a-group-or-project-to-the-job-token-allowlist) to handle authentication and authorization.\n\nHere’s a simplified example of provisioning a multi-region service in the service inventory:\n\n```\n{\n  \"inventory\": [\n    {\n      \"name\": \"example-service\",\n      \"project_id\": 46267196,\n      \"regions\": [\n        \"europe-west1\",\n        \"us-east1\",\n        \"us-west1\"\n      ]\n    }\n  ]\n}\n```\n\nOnce provisioned, a deployment project and necessary infrastructure will be created for a service.\n\n### Configure\n\nAfter a service is provisioned, the next challenge is the configuration for a service. In Runway, [Reconciler](https://gitlab.com/gitlab-com/gl-infra/platform/runway/runwayctl) is a component that is responsible for configuring and deploying services by aligning the actual state with the desired state using Golang and Terraform.\n\nHere’s a simplified example of an application developer configuring GitLab CI/CD in their service project:\n\n```\n# .gitlab-ci.yml\nstages:\n  - validate\n  - runway_staging\n  - runway_production\n\ninclude:\n  - project: 'gitlab-com/gl-infra/platform/runway/runwayctl'\n    file: 'ci-tasks/service-project/runway.yml'\n    inputs:\n      runway_service_id: example-service\n      image: \"$CI_REGISTRY_IMAGE/${CI_PROJECT_NAME}:${CI_COMMIT_SHORT_SHA}\"\n      runway_version: v3.22.0\n\n# omitted for brevity\n```\n\nRunway provides sane default values for configuration that are based on our experience in delivering stable and reliable features to customers. Additionally, service owners can configure infrastructure using a service manifest file hosted in a service project. The service manifest uses JSON Schema for validation. When building this functionality with GitLab, Runway leverages [Pages](https://docs.gitlab.com/ee/user/project/pages/) for schema documentation.\n\nTo deliver this part of the platform, Runway leverages [CI/CD templates](https://docs.gitlab.com/ee/development/cicd/templates.html), [Releases](https://docs.gitlab.com/ee/user/project/releases/), and [Container Registry](https://docs.gitlab.com/ee/user/packages/container\\_registry/) for integrating with service projects.\n\nHere’s a simplified example of a service manifest:\n\n```\n# .runway/runway-production.yml\napiVersion: runway/v1\nkind: RunwayService\nspec:\n container_port: 8181\n regions:\n   - us-east1\n   - us-west1\n   - europe-west1\n\n# omitted for brevity\n```\n\nFor multi-region services, Runway injects an environment variable into the container instance runtime, e.g. RUNWAY\\_REGION, so application developers have the context to make any downstream dependencies regionally-aware, e.g. Vertex AI API.\n\nOnce configured, a service project will be integrated with a deployment project.\n\n### Deploy\n\nAfter a service project is configured, the next challenge is deploying a service. In Runway, Reconciler handles this by triggering a deployment job in the deployment project when an MR is merged to the main branch. When building this functionality with GitLab, Runway leverages [Trigger Pipelines](https://docs.gitlab.com/ee/ci/triggers/) and [Multi-Project Pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream\\_pipelines.html\\#multi-project-pipelines) to trigger jobs from service project to deployment project.\n\n![trigger jobs from service project to deployment project](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098671612.png)\n\nOnce a pipeline is running in a deployment project, it will be deployed to an environment. By default, Runway will provision staging and production environments for all services. At this point, Reconciler will apply any Terraform resource changes for infrastructure. When building this functionality with GitLab, Runway leverages [Environments/Deployments](https://docs.gitlab.com/ee/ci/environments/) and [GitLab-managed Terraform state](https://docs.gitlab.com/ee/user/infrastructure/iac/terraform\\_state.html) for each service.\n\n![Reconciler applies any Terraform resource changes for infrastructure](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098671614.png)\n\nRunway provides default application metrics for services. Additionally, custom metrics can be used by enabling a sidecar container with OpenTelemetry Collector configured to scrape Prometheus and remote write to Mimir. By providing observability out of the box, Runway is able to bake monitoring into CI/CD pipelines.\n\nExample scenarios include gradual rollouts for blue/green deployments, preventing promotions to production when staging is broken, or automatically rolling back to previous revision when elevated error rates occur in production.\n\n![Runway bakes monitoring into CI/CD pipelines](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098671615.png)\n\nOnce deployed, environments will serve the latest revision of a service. At this point, you should have a good understanding of some of the challenges that will be encountered, and how to solve them with GitLab features.\n\n## Migrating to multi-region in production\n\nAfter extending Runway components to support multi-region in Cloud Run, the final challenge was migrating from AI Gateway’s single-region deployment in production with zero downtime. Today, teams using Runway to deploy their services can self-serve on regions making a multi-region deployment just as simple as a single-region deployment. \n\nWe were able to iterate on building multi-region functionality without impacting existing infrastructure by using semantic versioning for Runway. Next, we’ll share some learnings from the migration that may inform how to operate services for an internal multi-region platform.\n\n### Dry run deployments\n\nIn Runway, Reconciler will apply Terraform changes in CI/CD. The trade-off is that plans cannot be verified in advance, which could risk inadvertently destroying or misconfiguring production infrastructure. To solve this problem, Runway will perform a “dry run” deployment for MRs.\n\n![\"Dry run\" deployment](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098671616.png)\n\nFor migrating AI Gateway, dry run deployments increased confidence and helped mitigate risk of downtime during rollout. When building an internal platform with GitLab, we recommend supporting dry run deployments from the start.\n\n### Regional observability\n\nIn Runway, existing observability was aggregated by assuming a single-region deployment. To solve this problem, Runway observability was retrofitted to include a new region label for Prometheus metrics.\n\nOnce metrics were retrofitted, we were able to introduce service level indicators (SLIs) for both regional Cloud Run services and global load balancing. Here’s an example dashboard screenshot for a general Runway service:\n\n![dashboard screenshot for a general Runway service](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098671617.png)\n\n***Note:** Data is not actual production data and is only for illustration purposes.*\n\nAdditionally, we were able to update our service level objectives (SLOs) to support regions. As a result, service owners could be alerted when a specific region experiences an elevated error rate, or increase in response times.\n\n![screenshot of alerts](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098671617.png)\n\n***Note:** Data is not actual production data and is only for illustration purposes.*\n\nFor migrating AI Gateway, regional observability increased confidence and helped provide more visibility into new infrastructure. When building an internal platform with GitLab, we recommend supporting regional observability from the start.\n\n### Self-service regions\n\nThe Infrastructure department successfully performed the initial migration of multi-region support for AI Gateway in production with zero downtime. Given the risk associated with rolling out a large infrastructure migration, it was important to ensure the service continued working as expected.\n\nShortly afterwards, service owners began self-serving additional regions to meet the growth of customers. At the time of writing, [GitLab Duo](https://about.gitlab.com/gitlab-duo/) is available in six regions around the globe and counting. Service owners are able to configure the desired regions, and Runway will provide guardrails along the way in a scalable solution.\n\nAdditionally, three other internal services have already started using multi-region functionality on Runway. Application developers have entirely self-served functionality, which validates that we’ve provided a good platform experience for service owners. For a platform play, a scalable solution like Runway is considered a good outcome since the Infrastructure department is no longer a blocker.\n\n## What’s next for Runway\n\nBased on how quickly we could iterate to provide results for customers, the SaaS Platforms department has continued to invest in Runway. We’ve grown the Runway team with additional contributors, started evolving the platform runtime (e.g. Google Kubernetes Engine), and continue dogfooding with tighter integration in the product.\n\nIf you’re interested in learning more, feel free to check out [https://gitlab.com/gitlab-com/gl-infra/platform/runway](https://gitlab.com/gitlab-com/gl-infra/platform/runway).\n\n## More Building GitLab with GitLab\n- [Why there is no MLOps without DevSecOps](https://about.gitlab.com/blog/there-is-no-mlops-without-devsecops/)\n- [Stress-testing Product Analytics](https://about.gitlab.com/blog/building-gitlab-with-gitlab-stress-testing-product-analytics/)\n- [Web API Fuzz Testing](https://about.gitlab.com/blog/building-gitlab-with-gitlab-api-fuzzing-workflow/)\n- [How GitLab.com inspired Dedicated](https://about.gitlab.com/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated/)\n- [Expanding our security certification portfolio](https://about.gitlab.com/blog/building-gitlab-with-gitlab-expanding-our-security-certification-portfolio/)\n",[110,937,832,915,726,704,728,702,725,981],"AI/ML",{"slug":983,"featured":92,"template":678},"building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features","content:en-us:blog:building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features.yml","Building Gitlab With Gitlab A Multi Region Service To Deliver Ai Features","en-us/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features.yml","en-us/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features",{"_path":989,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":990,"content":996,"config":1003,"_id":1005,"_type":16,"title":1006,"_source":17,"_file":1007,"_stem":1008,"_extension":20},"/en-us/blog/how-to-stream-logs-through-the-gitlab-dashboard-for-kubernetes",{"title":991,"description":992,"ogTitle":991,"ogDescription":992,"noIndex":6,"ogImage":993,"ogUrl":994,"ogSiteName":692,"ogType":693,"canonicalUrls":994,"schema":995},"How to stream logs through the GitLab Dashboard for Kubernetes","In GitLab 17.2, users can now view Kubernetes pod and container logs directly via the GitLab UI. This tutorial shows how to use this new feature to simplify monitoring Kubernetes infrastructure.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662245/Blog/Hero%20Images/blog-image-template-1800x945__16_.png","https://about.gitlab.com/blog/how-to-stream-logs-through-the-gitlab-dashboard-for-kubernetes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to stream logs through the GitLab Dashboard for Kubernetes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Daniel Helfand\"}],\n        \"datePublished\": \"2024-08-19\",\n      }",{"title":991,"description":992,"authors":997,"heroImage":993,"date":999,"body":1000,"category":14,"tags":1001},[998],"Daniel Helfand","2024-08-19","Developers are context-switching more frequently, needing to understand and use multiple tools to accomplish complex tasks. These tools all have different user experiences and often do not present all the information needed to successfully develop, troubleshoot, and ship critical features. It is challenging enough to release and monitor software changes without also needing to understand so many tools.\n\nWith the addition of [pod log streaming through the GitLab Dashboard for Kubernetes in v17.2](https://about.gitlab.com/releases/2024/07/18/gitlab-17-2-released/#log-streaming-for-kubernetes-pods-and-containers), developers can go straight from a merge request review to watching a deployment rolled out to Kubernetes. This new feature will:\n- allow developers to avoid switching tooling\n- ease the process of troubleshooting and monitoring deployments and post-deployment application health\n- strengthen [GitOps workflows](https://docs.gitlab.com/ee/user/clusters/agent/gitops.html) to easily manage application and infrastructure changes\n\nThe new feature allows GitLab users to view the logs of pods and containers directly via the GitLab UI. In previous versions of GitLab, users could configure a GitLab project to view pods deployed to certain namespaces on an associated cluster. This new feature allows users to further monitor workloads running on Kubernetes without needing to switch to another tool.\n\nIn the sections below, you will learn how to use this new feature by adding a Kubernetes cluster to a GitLab project, deploying a sample workload to a cluster, and viewing the logs of this workload running on a cluster. \n\n> Need to know the basics of Kubernetes? [Read this quick introductory blog](https://about.gitlab.com/blog/kubernetes-the-container-orchestration-solution/).\n\n## Configure a GitLab project to view Kubernetes resources\n\nBefore proceeding with this section, the following prerequisites are required:\n* a remote Kubernetes cluster (i.e., not running locally on your machine)\n* access to a GitLab v17.2 account\n* [this repository](https://gitlab.com/gitlab-da/tutorials/cloud-native/gitlab-k8s-log-streaming-example) forked to a GitLab group to which you have access\n* Helm CLI\n* kubectl CLI\n\nOnce you have satisfied the prerequisites involved, add an agent configuration file to the GitLab project you forked. The configuration file allows users to control permissions around how GitLab users may interact with the associated Kubernetes cluster.\n\nYou can use the configuration file included in this GitLab project by changing the following file: `.gitlab/agents/k8s-agent/config.yaml`. Replace the `\u003CGitLab group>` in the id property shown below with the group where you have forked the example project. This config file will allow [GitLab to access your cluster via an agent](https://docs.gitlab.com/ee/user/clusters/agent/user_access.html) that can be installed on your cluster.\n\n```yaml\nuser_access:\n  access_as:\n    agent: {}\n  projects:\n    - id: \u003CGitLab group>/gitlab-k8s-log-streaming-example\n```\n\nOnce the above file is edited, you can commit and push these changes to the main branch of the project. \n\n## Add GitLab Kubernetes agent to cluster\n\nWith the agent configuration file added, now add the cluster to GitLab by installing an agent on your cluster. In the GitLab UI, go to your project and, on the left side of the screen, select **Operate > Kubernetes clusters**. Once on this page, select the **Connect a cluster** button on the right side of the screen. From the dropdown menu, you can then select the agent, which should be `k8s-agent`. Click **Register** to get instructions for how to install the agent on your cluster.\n\nThe instructions presented to you after registering the agent will be to run a helm command that will install the GitLab agent on your cluster. Before running the command locally, you will want to ensure your Kubernetes context is targeting the cluster you want to work with. Once you have verified you are using the correct kubeconfig locally, you can run the helm command to install the agent on your cluster.\n\nOnce both pods are running, GitLab should be able to connect to the agent. Run the following command to wait for the pods to start up:\n\n```shell\nkubectl get pods -n gitlab-agent-k8s-agent -w\n```\n\n## Deploy sample application to your cluster\n\nBefore you can view logs of a workload through GitLab, you first need to have something running on your cluster. To do this, you can run the following kubectl command locally. \n\n```shell\nkubectl apply -f https://gitlab.com/gitlab-da/tutorials/cloud-native/gitlab-k8s-log-streaming-example/-/raw/main/k8s-manifests/k8s.yaml\n```\n\nAfter the command runs successfully, you are now ready to complete the final step to set up a Kubernetes dashboard via GitLab.\n\n## View pod logs through the GitLab UI\n\nTo add the Kubernetes dashboard via the GitLab UI, go to your project and, on the left side of the screen, select **Operate > Environments**. On the top right side of the screen, select the **Create an environment**.\n\nNext, you can give your environment a name, select the GitLab agent (i.e. `k8s-agent`), and pick a namespace for the Kubernetes dashboard to focus on. Since the application is running in the `gitlab-k8s-log-streaming-example-dev` namespace, select this option from the namespace dropdown. After naming the environment and selecting the agent and namespace, click **Save**.\n\nAfter creating the environment, you should now see information about the application’s pods displayed via the GitLab UI.\n\n![Kubernetes logs - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676402/Blog/Content%20Images/Screenshot_2024-08-20_at_12.15.08_PM.png)\n\nGo to the right side of the screen and click **View Logs** to see logs for one of the pods associated with the application. \n\n![Kubernetes dashboard - image 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676402/Blog/Content%20Images/Screenshot_2024-08-20_at_12.16.56_PM.png)\n\n## Try it out and share feedback\n\nThe introduction of pod log streaming in GitLab v17.2 will help GitLab users get one step closer to managing complex deployments to Kubernetes, as well as monitoring and troubleshooting issues post deployment via a common user experience. We are excited to hear more about users’ experiences with this new enhancement and how it helps improve DevOps workflows around Kubernetes. To share your experience with us, you can open an issue to the [project associated with this tutorial](https://gitlab.com/gitlab-da/tutorials/cloud-native/gitlab-k8s-log-streaming-example). Or, [comment directly in the Kubernetes log streaming feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/478379) to report information to the GitLab engineering team.\n\nMore information on getting started with the GitLab Dashboard for Kubernetes can be found in the documentation [here](https://docs.gitlab.com/ee/ci/environments/kubernetes_dashboard.html).\n\n> To explore the GitLab Dashboard for Kubernetes as well as other more advanced features of GitLab, sign up for [our free 30-day trial of GitLab Ultimate](https://about.gitlab.com/free-trial/).\n",[749,535,1002,726],"kubernetes",{"slug":1004,"featured":92,"template":678},"how-to-stream-logs-through-the-gitlab-dashboard-for-kubernetes","content:en-us:blog:how-to-stream-logs-through-the-gitlab-dashboard-for-kubernetes.yml","How To Stream Logs Through The Gitlab Dashboard For Kubernetes","en-us/blog/how-to-stream-logs-through-the-gitlab-dashboard-for-kubernetes.yml","en-us/blog/how-to-stream-logs-through-the-gitlab-dashboard-for-kubernetes",{"_path":1010,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1011,"content":1017,"config":1024,"_id":1026,"_type":16,"title":1027,"_source":17,"_file":1028,"_stem":1029,"_extension":20},"/en-us/blog/faq-gitlab-ci-cd-catalog",{"title":1012,"description":1013,"ogTitle":1012,"ogDescription":1013,"noIndex":6,"ogImage":1014,"ogUrl":1015,"ogSiteName":692,"ogType":693,"canonicalUrls":1015,"schema":1016},"FAQ: GitLab CI/CD Catalog","Unlock the full potential of the CI/CD Catalog with expert tips and answers to common questions, including how to create and share components.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098783/Blog/Hero%20Images/Blog/Hero%20Images/cicdcover_5vLe737i4QfvAqv6PnqUaR_1750098782745.png","https://about.gitlab.com/blog/faq-gitlab-ci-cd-catalog","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"FAQ: GitLab CI/CD Catalog\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"},{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2024-08-01\",\n      }",{"title":1012,"description":1013,"authors":1018,"heroImage":1014,"date":1021,"body":1022,"category":14,"tags":1023},[1019,1020],"Itzik Gan Baruch","Dov Hershkovitch","2024-08-01","The [GitLab CI/CD Catalog](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/), part of the DevSecOps platform, allows users to discover, reuse, and contribute [CI/CD](https://about.gitlab.com/topics/ci-cd/) components to make software development more efficient and productive. Recently, we hosted a CI/CD Catalog webinar that surfaced a host of helpful questions. This FAQ features some of those questions (and answers) and highlights the CI/CD Catalog's capabilities as well as best practices for using it in your environment.\n\n***When will the CI catalog components and inputs be available on Gitlab.com?***\n\nThe [CI catalog components and inputs became generally available](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/) starting GitLab 17.0 (in GitLab.com and self-managed).  \n\n***What about versioning components? Often a pipeline is coupled with the code, and we want a way to re-run a release pipeline from an older version of the code. Do we have options for version components similarly to how we do the application?***\n\nWe have full support for version control – at any given time you can use any earlier version.\n\n***Can we have composite components that use multiple other components?***\n\nAbsolutely! Here is an example of a deploy component that uses a validate component.\n\n![example of a deploy component that uses validate component](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098788/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098788135.png)\n\n***What are the options for testing components?*** \n\nThere are several methods of testing components. The first method is mentioned in [the documentation](https://docs.gitlab.com/ee/ci/components/examples.html#test-a-component): Including a component using `$CI_COMMIT_SHA` (instead of version), you can test your component for every single commit. Another strategy is to use [child pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html#parent-child-pipelines), which allows you to test a component with different inputs parameters. More details can be found in the [GitLab forum](https://forum.gitlab.com/t/ci-cd-component-testing-strategies/102983/2?u=leetickett-gitlab).\n\n***Can the component reference URL use a branch name as the version, similar to how docs show a tag (e.g., $CI_SERVER_FQDN/my-org/security-components/secret-detection@master)?*** \n\nYes, you can use a branch name. [The CI/CD Catalog documentation](https://docs.gitlab.com/ee/ci/components/#component-versions) lists components versions.  \n\n***How can you show the catalog in self-managed instances?***\n\nA self-managed catalog will be available, but will be empty without any published components. You can use this catalog internally in your organization and it is up for you and your teams to populate it with the appropriate components. Alternatively, you can mirror existing components projects from Gitlab.com to your self-managed   instance.\n\n***Can we clone the public repo into a self-hosted instance?*** \n\nA component is hosted in a GitLab project and like any other project it can be cloned locally. Follow these instructions on [how to mirror a component from GitLab.com to self-managed instance](https://docs.gitlab.com/ee/ci/components/#use-a-gitlabcom-component-in-a-self-managed-instance).\n\n***How can you prevent name collisions with CI/CD component jobs?***\n\nUse inputs to specify dynamic job names, which will allow you to [include the same component multiple times in the same pipeline](https://docs.gitlab.com/ee/ci/yaml/inputs.html#include-the-same-file-multiple-times).\n\n***Is it possible to inspect the source code of components in the catalog?***\n\nYes, to view the source code, from the catalog open a component you would like to view. Then, click the component name – this will open the project where the component is hosted and you can find the component’s .yml file in the component's templates folder.\n\n***Can a component receive an array of data as input parameter?***\n\n[A component can receive multiple data types](https://docs.gitlab.com/ee/ci/yaml/?query=inputs#specinputstype) such as string, boolean, number, and array.\n\n***Can the component reference more files alongside the .yml file?***\n\nNo, it can’t. This capability is available in [CI Steps](https://docs.gitlab.com/ee/ci/steps/) (which is experimental).\n\n***Can we have anti-patterns for CI/CD components?***\n\nPlease [follow the best practice section in the documentation](https://docs.gitlab.com/ee/ci/components/#write-a-component).\n\n***Is it possible to limit a group to only using components owned by the group (i.e., not allowing community components)?***\n\nNot yet, but [this feature is on our roadmap](https://gitlab.com/gitlab-org/gitlab/-/issues/441102).\n\n***Is the GitLab CI Steps feature related to this component in any way?***\n\nYes, it is, we consider CI Steps as another type of component. More details can be found in [the CI Steps documentation](https://docs.gitlab.com/ee/ci/steps).\n\n***Is it possible to make private components for your organization only?***\n\nYes, the [component's visibility](https://docs.gitlab.com/ee/ci/components/#view-the-cicd-catalog) is based on the visibility level of your project and only members that have the privileges to see the project can view and search the component in the catalog.\n\n***What is the best approach if I need to fork a Gitlab.com component in terms of GitLab flow to manage the forked repo and propose changes when needed to the original repo?***\n\nYou can manage your fork similarly to how you manage any Git repository – by making changes in your fork and then creating merge requests to propose changes back to the original repository.\n\n***Is there any difference in source code standardization between a verified creator and a non-verified creator in the catalog? Do verified creators have to follow a higher standard?***\n\nCurrently, there is no process to verify and approve individual creators from our extended community. However. we do have a [process for GitLab partners and GitLab-maintained components](https://docs.gitlab.com/ee/ci/components/#verified-component-creators).\n\n***How would you recommend implementing tools like Fortify SCA into your CI/CD pipeline?***\n\nTwo options would be possible: Either Fortify would need to create a shared component in the catalog that exposes the necessary elements for public consumption, or, if publicly-available APIs exist, the community can build an open-source component to be shared and used by others in the catalog.\n\n***What sort of patterns do you recommend for providing \"outputs\" from components that are consumed by other jobs/components in the including pipeline?***\n\nThere is no ability to specify outputs for components, but this is on the roadmap with a new capability called [CI Steps](https://docs.gitlab.com/ee/ci/steps/).\n\n***Is there any plan to label components?***\n\nYes! in this [GitLab epic](https://gitlab.com/groups/gitlab-org/-/epics/11917), we have several issues to enhance searching and discoverability by content type, tags, and category.\n\n***Will existing CI/CD templates be migrated to components?***\n\nYes, the GitLab templates are migrated and have a special badge in the CI/CD Catalog.\n\n***What's the recommended way to transition from our existing GitLab pipeline templates to GitLab catalog components?***\n\nThis should be rather simple since components are very similar to templates. We would recommend start using inputs in your templates, and later on moving them to the appropriate folder structure.\n\n> Learn more about the CI/CD Catalog and components:\n>  \n> - [CI/CD Catalog goes GA: No more building pipelines from scratch](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/)\n> \n> - [A CI/CD component builder's journey](https://about.gitlab.com/blog/a-ci-component-builders-journey/)\n>\n> - [Documentation: CI/CD components and CI/CD Catalog](https://docs.gitlab.com/ee/ci/components/)\n>\n> - [Introducing CI/CD components and how to use them in GitLab](https://about.gitlab.com/blog/introducing-ci-components/)\n>\n",[110,725],{"slug":1025,"featured":92,"template":678},"faq-gitlab-ci-cd-catalog","content:en-us:blog:faq-gitlab-ci-cd-catalog.yml","Faq Gitlab Ci Cd Catalog","en-us/blog/faq-gitlab-ci-cd-catalog.yml","en-us/blog/faq-gitlab-ci-cd-catalog",{"_path":1031,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1032,"content":1038,"config":1044,"_id":1046,"_type":16,"title":1047,"_source":17,"_file":1048,"_stem":1049,"_extension":20},"/en-us/blog/building-a-gitlab-ci-cd-pipeline-for-a-monorepo-the-easy-way",{"title":1033,"description":1034,"ogTitle":1033,"ogDescription":1034,"noIndex":6,"ogImage":1035,"ogUrl":1036,"ogSiteName":692,"ogType":693,"canonicalUrls":1036,"schema":1037},"Building a GitLab CI/CD pipeline for a monorepo the easy way","Learn how to create a GitLab CI/CD pipeline for a monorepo to host multiple applications in one repository.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749660151/Blog/Hero%20Images/blog-image-template-1800x945__26_.png","https://about.gitlab.com/blog/building-a-gitlab-ci-cd-pipeline-for-a-monorepo-the-easy-way","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Building a GitLab CI/CD pipeline for a monorepo the easy way\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sam Morris\"}],\n        \"datePublished\": \"2024-07-30\",\n      }",{"title":1033,"description":1034,"authors":1039,"heroImage":1035,"date":1041,"body":1042,"category":14,"tags":1043},[1040],"Sam Morris","2024-07-30","Monorepos allow you to host multiple applications’ code in a single repository. In GitLab, that involves placing disparate application source code in separate directories in one project. While this strategy allows for version controlled storage of your code, it was tricky leveraging the full power of GitLab’s [CI/CD](https://about.gitlab.com/topics/ci-cd/) pipeline capabilities… until now!\n\n## The ideal case: CI/CD in a monorepo\n\nSince you have more than one application’s code living in your repository, you will want to have more than one pipeline configuration. For example, if you have a .NET application and a Spring application in one project, each application may have different build and test jobs to complete. Ideally, you can completely decouple the pipelines and only run each pipeline based on changes to that specific application’s source code.\n\nThe technical approach for this would be to have a project-level `.gitlab-ci.yml` pipeline configuration file that includes a specific YAML file based on changes in a certain directory. The `.gitlab-ci.yml` pipeline serves as the control plane that triggers the appropriate pipeline based on the changes made to the code.\n\n## The legacy approach\n\nPrior to GitLab 16.4, we were not able to include a YAML file based on changes to a directory or file in a project. However, we could accomplish this functionality via a workaround. \n\nIn our monorepo project, we have two directories for different applications. In this example, there are `java` and `python` directories representing a Java and Python app, respectively. Each directory has an application-specific YAML file to build each app. In the project’s pipeline file, we simply include both application pipeline files, and do the logic handling in those files directly.\n\n`.gitlab-ci.yml`:\n\n```\nstages:\n  - build\n  - test\n  - deploy\n\ntop-level-job:\n  stage: build\n  script:\n    - echo \"Hello world...\"\n\ninclude:\n  - local: '/java/j.gitlab-ci.yml'\n  - local: '/python/py.gitlab-ci.yml'\n\n```\n\nIn each application-specific pipeline file, we create a hidden job named .java-common or .python-common that only runs if there are changes to that app’s directory. [Hidden jobs](https://docs.gitlab.com/ee/ci/jobs/#hide-jobs) do not run by default, and are often utilized to reuse specific job configurations. Each pipeline extends that hidden job to inherit the rules defining which files to watch for changes, which would then initiate the pipeline job. \n\n`j.gitlab-ci.yml`:\n\n```\nstages:\n  - build\n  - test\n  - deploy\n\n.java-common:\n  rules:\n    - changes:\n      - '../java/*'\n\njava-build-job:\n  extends: .java-common\n  stage: build\n  script:\n    - echo \"Building Java\"\n\njava-test-job:\n  extends: .java-common\n  stage: test\n  script:\n    - echo \"Testing Java\"\n\n```\n\n`py.gitlab-ci.yml`:\n\n```\nstages:\n  - build\n  - test\n  - deploy\n\n.python-common:\n  rules:\n    - changes:\n      - '../python/*'\n\npython-build-job:\n  extends: .python-common\n  stage: build\n  script:\n    - echo \"Building Python\"\n\npython-test-job:\n  extends: .python-common\n  stage: test\n  script:\n    - echo \"Testing Python\"\n\n```\n\nThere are some downsides to this, including having to extend the job for each other job in the YAML file to ensure it complies with the rules, creating a lot of redundant code and room for human error. Additionally, extended jobs cannot have duplicate keys, so you could not define your own `rules` logic in each job since there would be a collision in the keys and their [values are not merged](https://docs.gitlab.com/ee/ci/yaml/index.html#extends). \n\nThis results in a pipeline running that includes the j.gitlab-ci.yml jobs when `java/` is updated, and py.gitlab-ci.yml when `python/` is updated. \n\n## The new approach: Conditionally include pipeline files\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/6phvk8jioAo?si=y6ztZODvUtM-cHmZ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nIn GitLab 16.4, we introduced [`include` with `rules:changes` for pipelines](https://docs.gitlab.com/ee/ci/yaml/includes.html#include-with-ruleschanges). Previously, you could `include` with `rules:if`, but not `rules:changes` making this update extremely powerful. Now, you can simply use the `include` keyword and define the monorepo rules in your project pipeline configuration. \n\nNew `.gitlab-ci.yml`:\n\n```\nstages:\n  - build\n  - test\n\ntop-level-job:\n  stage: build\n  script:\n    - echo \"Hello world...\"\n\ninclude:\n  - local: '/java/j.gitlab-ci.yml'\n    rules:\n      - changes:\n        - 'java/*'\n  - local: '/python/py.gitlab-ci.yml'\n    rules:\n      - changes:\n        - 'python/*'\n\n```\n\nThen each application’s YAML can just focus on building and testing that application’s code, without extending a hidden job repeatedly. This allows for more flexibility in job definitions and reduces code rewriting for engineers.\n\nNew `j.gitlab-ci.yml`:\n\n```\nstages:\n  - build\n  - test\n  - deploy\n\njava-build-job:\n  stage: build\n  script:\n    - echo \"Building Java\"\n\njava-test-job:\n  stage: test\n  script:\n    - echo \"Testing Java\"\n\n```\n\nNew `py.gitlab-ci.yml`:\n```\nstages:\n  - build\n  - test\n  - deploy\n\npython-build-job:\n  stage: build\n  script:\n    - echo \"Building Python\"\n\npython-test-job:\n  stage: test\n  script:\n    - echo \"Testing Python\"\n\n```\n\nThis accomplishes the same task of including the Java and Python jobs only when their directories are modified. Something to consider in your implementation is that [jobs can run unexpectedly when using `changes`](https://docs.gitlab.com/ee/ci/jobs/job_troubleshooting.html#jobs-or-pipelines-run-unexpectedly-when-using-changes). The changes rule always evaluates to true when pushing a new branch or a new tag to GitLab, so all jobs included will run upon first push to a branch regardless of the `rules:changes` definition. You can mitigate this experience by creating your feature branch first and then opening a merge request to begin your development, since the first push to the branch when it is created will force all jobs to run.\n\nUltimately, monorepos are a strategy that can be used with GitLab and CI/CD, and, with our new `include` with `rules:changes` feature, we have a better best practice for using GitLab CI with monorepos. To get started with monorepos, take out a free Gitlab Ultimate trial today.\n\n## More CI/CD resources\n\n* [5 tips for managing monorepos in GitLab](https://about.gitlab.com/blog/tips-for-managing-monorepos-in-gitlab/)\n* [How to learn CI/CD fast](https://about.gitlab.com/blog/how-to-learn-ci-cd-fast/)",[110,726],{"slug":1045,"featured":6,"template":678},"building-a-gitlab-ci-cd-pipeline-for-a-monorepo-the-easy-way","content:en-us:blog:building-a-gitlab-ci-cd-pipeline-for-a-monorepo-the-easy-way.yml","Building A Gitlab Ci Cd Pipeline For A Monorepo The Easy Way","en-us/blog/building-a-gitlab-ci-cd-pipeline-for-a-monorepo-the-easy-way.yml","en-us/blog/building-a-gitlab-ci-cd-pipeline-for-a-monorepo-the-easy-way",{"_path":1051,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1052,"content":1058,"config":1064,"_id":1066,"_type":16,"title":1067,"_source":17,"_file":1068,"_stem":1069,"_extension":20},"/en-us/blog/kubernetes-overview-operate-cluster-data-on-the-frontend",{"title":1053,"description":1054,"ogTitle":1053,"ogDescription":1054,"noIndex":6,"ogImage":1055,"ogUrl":1056,"ogSiteName":692,"ogType":693,"canonicalUrls":1056,"schema":1057},"Kubernetes overview: Operate cluster data on the frontend","GitLab offers a built-in solution for monitoring your Kubernetes cluster health. Learn more about the technical design and functionality with this detailed guide.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099045/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2816%29_3L7ZP4GxJrShu6qImuS4Wo_1750099045397.png","https://about.gitlab.com/blog/kubernetes-overview-operate-cluster-data-on-the-frontend","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Kubernetes overview: Operate cluster data on the frontend\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Anna Vovchenko\"}],\n        \"datePublished\": \"2024-06-20\",\n      }",{"title":1053,"description":1054,"authors":1059,"heroImage":1055,"date":1061,"body":1062,"category":14,"tags":1063},[1060],"Anna Vovchenko","2024-06-20","Accessing real-time cluster information is crucial for verifying successful software deployments and initiating troubleshooting processes. In this article, you'll learn about GitLab's enhanced Kubernetes integration, including how to leverage the Watch API for real-time insights into deployment statuses and streamlined troubleshooting capabilities. \n\n## What are GitLab's Kubernetes resources?\n\nGitLab offers a dedicated [dashboard for Kubernetes](https://gitlab.com/groups/gitlab-org/-/epics/2493 \"Visualize the cluster state in GitLab\") to understand the status of connected clusters with an intuitive visual interface. It is integrated into the Environment Details page and shows resources relevant to the environment. Currently, three types of Kubernetes resources are available:\n\n- pods filtered by the Kubernetes namespace\n- services\n- Flux resource ([HelmRelease](https://fluxcd.io/flux/components/helm/helmreleases/) or [Kustomization](https://fluxcd.io/flux/components/kustomize/kustomizations/))\n\nFor these resources, we provide general information, such as name, status, namespace, age, etc. It is represented similarly to what the [kubectl](https://kubernetes.io/docs/reference/kubectl/) command would show when run from the Kubernetes cluster. More details can be found when clicking each resource: The side drawer shows the list of labels, annotations, and detailed status and spec information presented as read-only YAML code blocks.\n\nThe information provided helps to visualize the cluster state, spot any issues, and debug problematic deployments right away.\n\n## Frontend to cluster communication: The GitLab solution\n\nWe have developed a range of tools and solutions to enable a seamless connection and management of Kubernetes clusters within GitLab. One of the core components of this system is the [GitLab agent for Kubernetes](https://docs.gitlab.com/ee/user/clusters/agent/install/). This powerful tool provides a secure bidirectional connection between a GitLab instance and a Kubernetes cluster. It is composed of two main components: **agentk** and **KAS** (Kubernetes agent server).\n\n![Kubernetes flow chart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099055/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099055229.png)\n\nagentk is a lightweight cluster-side component. It is responsible for establishing a connection to a KAS instance and waiting for requests to process. It is proxying requests from KAS to Kubernetes API. It may also actively send information about cluster events to KAS.\n\nWhile agentk is actively communicating with the cluster, KAS represents a GitLab server-side component. It is responsible for:\n\n- accepting requests from agentk\n- authenticating agentk requests by querying GitLab backend\n- fetching the agent's configuration from a corresponding Git repository using Gitaly\n- polling manifest repositories for GitOps support\n\nWe implemented the agent access rights feature to provide access from the GitLab frontend to the cluster in a secure and reliable way. To enable the feature, the user should update the agent’s configuration file by adding the [user_access](https://docs.gitlab.com/ee/user/clusters/agent/user_access.html) section with the following parameters: `projects`, `groups`, and `access_as` to specify which projects can access cluster information via the agent and how it should authenticate.\n\nOnce this is done, the frontend can connect to the cluster by sending a request to the Rails controller, which should set a `gitlab_kas cookie`. This cookie is then added to the request sent to KAS together with the agent ID and Cross-Site Request Forgery (CSRF) token. Upon receiving the request, KAS checks the user’s authorization and forwards it to agentk, which makes an actual request to the Kubernetes API. Then the response goes all the way back from the agentk to KAS and finally to the GitLab client.\n\n![Kubernetes overview - how it works](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099055/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750099055229.png)\n\nTo integrate this logic on the GitLab frontend and use it within the Vue app, we developed a JavaScript library: [@gitlab/cluster-client](https://gitlab.com/gitlab-org/cluster-integration/javascript-client). It is generated from the Kubernetes OpenAPI specification using the typescript-fetch generator. It provides all the Kubernetes APIs in a way that can be used in a web browser.\n\n## Introducing the Watch API\n\nThe most challenging task is to provide **real-time updates** for the Kubernetes dashboard. Kubernetes introduces the concept of watches as an extension of GET requests, exposing the body contents as a [readable stream](https://developer.mozilla.org/en-US/docs/Web/API/Streams_API/Using_readable_streams). Once connected to the stream, the Kubernetes API pushes cluster state updates similarly to how the `kubectl get \u003Cresource> --watch` command works. The watch mechanism allows a client to fetch the current state of the resource (or resources list) and then subscribe to subsequent changes, without missing any events. Each event contains a type of modification (one of three types: added, modified, or deleted) and the affected object.\n\nWithin the `WatchApi` class of the `@gitlab/cluster-client` library, we've developed a systematic approach for interacting with the Kubernetes API. This involves fetching a continuous stream of data, processing it line by line, and managing events based on their types. Let's explore the key components and functionalities of this approach:\n\n1. Extending the Kubernetes API: Within the WatchApi class, we extend the base Kubernetes API functionality to fetch a continuous stream of data with a specified path and query parameters. This extension enables efficient handling of large datasets, as the stream is processed line by line.\n  2. Decoding and event categorization: Upon receiving the stream, each line, typically representing a JSON object, is decoded. This process extracts relevant information and categorizes events based on their types.\n3. Internal data management: The `WatchApi` class maintains an internal data array to represent the current state of the streamed data, updating it accordingly as new data arrives or changes occur. \n4. The `WatchApi` class implements methods for registering event listeners, such as `onData`, `onError`, `onTimeout`, and `onTerminate`. These methods allow developers to customize their application's response to events like data updates, errors, and timeouts. \n\nThe code also handles scenarios such as invalid content types, timeouts, and errors from the server, emitting corresponding events for clients to handle appropriately. **With this straightforward, event-driven approach, the `WatchApi` class allows developers to create responsive real-time applications efficiently.**\n\n![Kubernetes overview - flow chart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099055/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750099055231.png)\n\n## How is the Kubernetes overview integrated with the GitLab frontend?\n\nCurrently, we have two Kubernetes integrations within the product: the Kubernetes overview section for the Environments and the full Kubernetes dashboard as a separate view. The latter is a major effort of representing all the available Kubernetes resources with filtering and sorting capabilities and a detailed view with the full information on the metadata, spec, and status of the resource. This initiative is now on hold while we are searching for the most useful ways of representing the Kubernetes resources related to an environment.\n\n[The Kubernetes overview](https://docs.gitlab.com/ee/ci/environments/kubernetes_dashboard.html) on the Environments page is a detailed view of the Kubernetes resources related to a specific environment. To access the cluster state view, the user should select an agent installed in the cluster with the appropriate access rights, provide a namespace (optionally), and select a related Flux resource.\n\nThe view renders a list of Kubernetes pods and services filtered by the namespace representing their statuses as well as the Flux sync status. Clicking each resource opens a detailed view with more information for easy issue spotting and high-level debugging. \n\n![Kubernetes overview - list of Kubernetes pods and services](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099055/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750099055233.png)\n\nWe need to set up a correct configuration object that will be used for all the API requests. In the configuration, we need to specify the URL provided by the KAS, that proxies the Kubernetes APIs; the GitLab agent ID to connect with; and the CSRF token. We need to include cookies so that the `kas_cookie` gets picked up and sent within the request.\n\n```javascript\ncreateK8sAccessConfig({ kasTunnelUrl, gitlabAgentId }) {\n  return {\n    basePath: kasTunnelUrl,\n    headers: {\n      'GitLab-Agent-Id': gitlabAgentId,\n      ...csrf.headers,\n    },\n    credentials: 'include',\n  };\n}\n```\n\nAll the API requests are implemented as GraphQl client queries for efficiency, flexibility, and ease of development. The query structure enables clients to fetch data from various sources in one request. With clear schema definitions, GraphQL minimizes errors and enhances developer efficiency.\n\nWhen first rendering the Kubernetes overview, the frontend requests static lists of pods, services, and Flux resource (either HelmRelease or Kustomization). The fetch request is needed to render the empty view correctly. If the frontend tried to subscribe to the Watch API stream and one of the resource lists was empty, we would wait for the updates forever and never show the actual result – 0 resources. In the case of pods and services, after the initial request, we subscribe to the stream even if an empty list was received to reflect any cluster state changes. For the Flux resource, the changes that the user would expect the resource to appear after the initial request are low. We use the empty response here as an opportunity to provide more information about the feature and its setup. \n\n![Kubernetes overview - flux sync status unavailable](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099055/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750099055235.png)\n\nAfter rendering the initial result, the frontend makes additional requests to the Kubernetes API with the `?watch=true` query parameter in the URL. We create separate watchers for each event type – data, error, or timeout. When receiving the data, we follow three steps:\n\n- transform the data\n- update the Apollo cache\n- run a mutation to update the connection status\n\n```javascript\nwatcher.on(EVENT_DATA, (data) => {\n  result = data.map(mapWorkloadItem);\n  client.writeQuery({\n    query,\n    variables: { configuration, namespace },\n    data: { [queryField]: result },\n  });\n\n  updateConnectionStatus(client, {\n    configuration,\n    namespace,\n    resourceType: queryField,\n    status: connectionStatus.connected,\n  });\n});\n```\n\nAs we show the detailed information for each resource, we rely on having the status, spec, and metadata fields with the annotations and labels included. The Kubernetes API wouldn’t always send this information, which could break the UI and throw errors from the GraphQl client. We transform the received data first to avoid these issues. We also add the `__typename` so that we can better define the data types and simplify the queries by reusing the shared fragments.\n\nAfter data stabilization, we update the Apollo cache so that the frontend re-renders the views accordingly to reflect cluster state changes. Interestingly, we can visualize exactly what happens in the cluster – for example, when deleting the pods, Kubernetes first creates the new ones in the pending state, and only then removes the old pods. Thus, for a moment we can see double the amount of pods. We can also verify how the pods proceed from one state to another in real-time. This is done with the combination of added, deleted, and modified events received from the Kubernetes APIs and processed in the `WatchApi` class of the `@gitlab/cluster-client` library.\n\n![Kubernetes overview - states of connection status](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099055/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099055236.gif)\n\nBy default, with a single Watch request, we get a stream of events for five minutes, and then it hits the timeout. We need to properly reflect this on the frontend so that the user is aware of any outdated information. To achieve this, we introduced a `k8sConnection` query together with `reconnectToCluster` mutation. We have a UI element – a badge with a tooltip to indicate the connection status. It has three states: connecting, connected, and disconnected. The state gets updated within every step of the UX flow. First, we set it to `connecting` once the Watch client gets created. Then we update it to `connected` with the first received piece of data. Last, we trigger the mutation for `disconnected` state when an error or timeout event occurs. This way, we can let the user refresh the view and reconnect to the stream without the need of refreshing the browser tab. Relying on the user action to reconnect to the stream helps us save resources and only request the necessary data while ensuring the accurate cluster state is available for the user at any time.\n\n## What’s next?\n\nLeveraging the Kubernetes built-in functionality for watching the Readable stream helped us to build the functionality quickly and provide the Kubernetes UI solution to our customers, getting early feedback and adjusting the product direction. This approach, however, presented technical challenges, such as the inability to utilize the GraphQl subscriptions and the need for reconnecting to the stream.\n\nWe are planning our next iterations to enhance the Kubernetes overview within GitLab UI. One of the planned iterations for the feature, [Frontend-friendly Kubernetes Watch API](https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent/-/issues/541), is an updated mechanism of batch-watching the cluster data and moving from the fetch Readable stream to WebSockets. We are going to create a new API in KAS to expose the Kubernetes watch capability via WebSocket. This should reduce the complexity of the JavaScript code, resolve the timeout issue, and improve the compatibility of the Kubernetes APIs within GitLab frontend integrations.\n\n> Curious to learn more or want to try out this functionality? Visit our [Kubernetes Dashboard documentation](https://docs.gitlab.com/ee/ci/environments/kubernetes_dashboard.html) for more details and configuration tips.\n",[1002,749,726],{"slug":1065,"featured":92,"template":678},"kubernetes-overview-operate-cluster-data-on-the-frontend","content:en-us:blog:kubernetes-overview-operate-cluster-data-on-the-frontend.yml","Kubernetes Overview Operate Cluster Data On The Frontend","en-us/blog/kubernetes-overview-operate-cluster-data-on-the-frontend.yml","en-us/blog/kubernetes-overview-operate-cluster-data-on-the-frontend",{"_path":1071,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1072,"content":1078,"config":1085,"_id":1087,"_type":16,"title":1088,"_source":17,"_file":1089,"_stem":1090,"_extension":20},"/en-us/blog/debug-web-apps-quickly-within-gitlab",{"title":1073,"description":1074,"ogTitle":1073,"ogDescription":1074,"noIndex":6,"ogImage":1075,"ogUrl":1076,"ogSiteName":692,"ogType":693,"canonicalUrls":1076,"schema":1077},"Debug Web apps quickly within GitLab","Jam for GitLab, a browser extension, creates GitLab issues with critical context such as browser info, console/network logs, and reproduction steps - in one click.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099168/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2810%29_arHGAEPyRHF7euCvaxE0S_1750099168482.png","https://about.gitlab.com/blog/debug-web-apps-quickly-within-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Debug Web apps quickly within GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ivanha Paz\"}],\n        \"datePublished\": \"2024-05-08\",\n      }",{"title":1073,"description":1074,"authors":1079,"heroImage":1075,"date":1081,"body":1082,"category":14,"tags":1083},[1080],"Ivanha Paz","2024-05-08","***Editor's note: From time to time, we invite members of the community to contribute to the GitLab Blog. Thanks to [Jam.dev](https://jam.dev/gitlab) for co-creating with us.***\n\nDebugging a Web app takes a village but gathering information about bugs as they happen can be challenging. Jam.dev launched the Jam for GitLab browser extension (available for Google Chrome, Arc, Opera, and Edge) that enables all DevSecOps team members to create comprehensive debugging reports, complete with instant replays of the bug, with a single click. \n\nThe reports, which are spun up as GitLab issues, include the context engineers need to find and fix bugs, including internet speed, browser information, console/network logs, and reproduction steps. Jam also parses GraphQL requests for errors which can be copied as cURL.\n\nJam for GitLab was built using GitLab's API so it is fully integrated with the DevSecOps platform. Here’s how Jam for GitLab works:\n\n1. Click on the Jam browser extension to record your screen and take a screenshot or replay a bug that just happened. \n\n![Create issue - gif 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099178/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099177866.gif)\n\n2. Jam automatically generates a GitLab issue with all the technical debugging context.\n\n![Info collected - gif 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099178/Blog/Content%20Images/Blog/Content%20Images/Jam_for_GitLab_debugging_aHR0cHM6_1750099177867.gif)\n\n## Why we created Jam for GitLab\nLike so many of you, we are huge fans of GitLab. A lot of Jam users love and engage with the GitLab developer community. It’s one of Jam’s top 3 most requested integrations! And with their API, GitLab makes it easy for startups like us to build new tools in the GitLab ecosystem. Building for the GitLab community is an important milestone for the Jam team. \n\nJust like GitLab values efficiency, we want to make developers’ lives easier. We believe the best way to do it is by removing a lot of unnecessary barriers for collaboration between engineering and product. We share this vision with GitLab and all of you using it to improve the lives of your customers; and quite literally build the future. \n\nLike my teammate and Jam engineer, Arég, says, “The worst part of the job is trying to debug an existing system to understand why it’s not behaving the way people expect. But you can use Jam, and maybe it’ll be less terrible, and you’ll have more time to do the best part: building something new.”\n\nGitLab is where product teams come together to build what’s next. Not just engineers, but everyone involved in the software development lifecycle. It takes a powerful platform to enable this level of collaboration. We love what GitLab stands for, and it’s truly an honor to contribute to our shared mission by making debugging easier and faster.\n\nFrom all of us at Jam, thank you to the GitLab team for being such awesome people to work with and building a product we and millions around the world love.\n\n> Ready to dramatically cut your debugging time? [Get started with Jam today.](https://jam.dev/gitlab)\n\n*Paz is DevRel lead at Jam.*\n",[232,1084,725],"code review",{"slug":1086,"featured":6,"template":678},"debug-web-apps-quickly-within-gitlab","content:en-us:blog:debug-web-apps-quickly-within-gitlab.yml","Debug Web Apps Quickly Within Gitlab","en-us/blog/debug-web-apps-quickly-within-gitlab.yml","en-us/blog/debug-web-apps-quickly-within-gitlab",{"_path":1092,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1093,"content":1099,"config":1105,"_id":1107,"_type":16,"title":1108,"_source":17,"_file":1109,"_stem":1110,"_extension":20},"/en-us/blog/tutorial-install-vs-code-on-a-cloud-provider-vm-and-set-up-remote-access",{"title":1094,"description":1095,"ogTitle":1094,"ogDescription":1095,"noIndex":6,"ogImage":1096,"ogUrl":1097,"ogSiteName":692,"ogType":693,"canonicalUrls":1097,"schema":1098},"Tutorial: Install VS Code on a cloud provider VM and set up remote access","Learn how to automate the installation of VS Code on a VM running on a cloud provider and how to access it from your local laptop.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670563/Blog/Hero%20Images/cloudcomputing.jpg","https://about.gitlab.com/blog/tutorial-install-vs-code-on-a-cloud-provider-vm-and-set-up-remote-access","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: Install VS Code on a cloud provider VM and set up remote access\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2024-05-06\",\n      }",{"title":1094,"description":1095,"authors":1100,"heroImage":1096,"date":1102,"body":1103,"category":14,"tags":1104},[1101],"Cesar Saavedra","2024-05-06","DevSecOps teams can sometimes find they need to run an instance of Visual Studio Code (VS Code) remotely for team members to share when they don't have enough local resources. However, installing, running, and using VS Code on a remote virtual machine (VM) via a cloud provider can be a complex process full of pitfalls and false starts. This tutorial covers how to automate the installation of VS Code on a VM running on a cloud provider.\n\nThis approach involves two separate GitLab projects, each with its own pipeline. The first one uses Terraform to instantiate a virtual machine in GCP running Linux Debian. The second one installs VS Code on the newly instantiated VM. Lastly, we provide a procedure on how to set up your local Mac laptop to connect and use the VS Code instance installed on the remote VM.\n\n## Create a Debian Linux distribution VM on GCP\n\nHere are the steps to create a Debian Linux distribution VM on GCP.\n\n### Prerequisites\n\n1. A GCP account. If you don't have one, please [create one](https://cloud.google.com/free?hl=en).\n2. A GitLab account on [gitlab.com](https://gitlab.com/users/sign_in)\n\n**Note:** This installation uses:\n\n- Debian 5.10.205-2 (2023-12-31) x86_64 GNU/Linux, a.k.a Debian 11\n\n### Create a service account and download its key\n\nBefore you create the first GitLab project, you need to create a service account in GCP and then generate and download a key. You will need this key so that your GitLab pipelines can communicate to GCP and the GitLab API.\n\n1. To authenticate GCP with GitLab, sign in to your GCP account and create a [GCP service account](https://cloud.google.com/docs/authentication#service-accounts) with the following roles:\n- `Compute Network Admin`\n- `Compute Admin`\n- `Service Account User`\n- `Service Account Admin`\n- `Security Admin`\n\n3. Download the JSON file with the service account key you created in the previous step.\n4. On your computer, encode the JSON file to `base64` (replace `/path/to/sa-key.json` to the path where your key is located):\n\n   ```shell\n   base64 -i /path/to/sa-key.json | tr -d \\\\n\n   ```\n\n**NOTE:** Save the output of this command. You will use it later as the value for the `BASE64_GOOGLE_CREDENTIALS` environment variable.\n\n### Configure your GitLab project\n\nNext, you need to create and configure the first GitLab project.\n\n1. Create a group in your GitLab workspace and name it `gcpvmlinuxvscode`.\n\n1. Inside your newly created group, clone the following project:\n\n   ```shell\n   git@gitlab.com:tech-marketing/sandbox/gcpvmlinuxvscode/gcpvmlnxsetup.git\n   ```\n\n1. Drill into your newly cloned project, `gcpvmlnxsetup`, and set up the following CI/CD variables to configure it:\n   1. On the left sidebar, select **Settings > CI/CD**.\n   1. Expand **Variables**.\n   1. Set the variable `BASE64_GOOGLE_CREDENTIALS` to the `base64` encoded JSON file you created in the previous section.\n   1. Set the variable `TF_VAR_gcp_project` to your GCP `project` ID.\n   1. Set the variable `TF_VAR_gcp_region` to your GCP `region` ID, e.g. us-east1, which is also its default value.\n   1. Set the variable `TF_VAR_gcp_zone` to your GCP `zone` ID, e.g. us-east1-d, which is also its default value.\n   1. Set the variable `TF_VAR_machine_type` to the GCP `machine type` ID, e.g. e2-standard-2, which is also its default value.\n   1. Set the variable `TF_VAR_gcp_vmname` to the GCP `vm name` you want to give the VM, e.g. my-test-vm, which is also its default value.\n\n**Note:** We have followed a minimalist approach to set up this VM. If you would like to customize the VM further, please refer to the [Google Terraform provider](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference) and the [Google Compute Instance Terraform provider](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_instance) documentation for additional resource options.\n\n### Provision your VM\n\nAfter configuring your project, manually trigger the provisioning of your VM as follows:\n\n1. On the left sidebar, go to **Build > Pipelines**.\n1. Next to **Play** (**{play}**), select the dropdown list icon (**{chevron-lg-down}**).\n1. Select **Deploy** to manually trigger the deployment job.\n\nWhen the pipeline finishes successfully, you can see your new VM on GCP:\n\n- Check it on your [GCP console's VM instances list](https://console.cloud.google.com/compute/instances).\n\n### Remove the VM\n\n**Important note:** Only run the cleanup job when you no longer need the GCP VM and/or the VS Code that you installed in it.\n\nA manual cleanup job is included in your pipeline by default. To remove all created resources:\n\n1. On the left sidebar, select **Build > Pipelines** and select the most recent pipeline.\n1. For the `destroy` job, select **Play** (**{play}**).\n\n## Install and set up VS Code on a GCP VM\n\nPerform the steps in this section only after you have successfully finished the previous sections above. In this section, you will create the second GitLab project that will install VS Code and its dependencies on the running VM on GCP.\n\n### Prerequisites\n\n1. A provisioned GCP VM. We covered this in the previous sections.\n\n**Note:** This installation uses:\n\n- VS Code Version 1.85.2\n\n### Configure your project\n\n**Note:** Since you will be using the `ssh` command multiple times on your laptop, we strongly suggest that you make a backup copy of your laptop local directory `$HOME/.ssh` before continuing.\n\nNext, you need to create and configure the second GitLab project.\n\n1. Head over to your GitLab group `gcpvmlinuxvscode`, which you created at the beginning of this post.\n\n1. Inside group, `gcpvmlinuxvscode`, clone the following project:\n\n   ```shell\n   git@gitlab.com:tech-marketing/sandbox/gcpvmlinuxvscode/vscvmsetup.git\n   ```\n\n1. Drill into your newly cloned project, `vscvmsetup` and set up the following CI/CD variables to configure it:\n   1. On the left sidebar, select **Settings > CI/CD**.\n   1. Expand **Variables**.\n   1. Set the variable `BASE64_GOOGLE_CREDENTIALS` to the `base64` encoded JSON file you created in project `gcpvmlnxvsc`. You can copy this value from the variable with the same name in project `gcpvmlnxvsc`.\n   1. Set the variable `gcp_project` to your GCP `project` ID.\n   1. Set the variable `gcp_vmname` to your GCP `region` ID, e.g. us-east1.\n   1. Set the variable `gcp_zone` to your GCP `zone` ID, e.g. us-east1-d.\n   1. Set the variable `vm_pwd` to the password that you will use to ssh to the VM.\n   1. Set the variable `gcp_vm_username` to the first portion (before the \"@\" sign) of the email associated to your GCP account, which should be your GitLab email.\n\n### Run the project pipeline\n\nAfter configuring the second GitLab project, manually trigger the provisioning of VS Code and its dependencies to the GCP VM as follows:\n\n1. On the left sidebar, select **Build > Pipelines** and click on the button **Run Pipeline**. On the next screen, click on the button **Run pipeline**.\n\n    The pipeline will:\n\n    - install `xauth` on the virtual machine. This is needed for effective X11 communication between your local desktop and the VM \n    - install `git` on the VM\n    - install `Visual Studio Code` on the VM.\n\n2. At this point, you can wait until the pipeline successfully completes. If you don't want to wait, you can continue to do the first step of the next section. However, you must ensure the pipeline has successfully completed before you can perform Step 2 of the next section.\n\n### Connect to your VM from your local Mac laptop\n\nNow that you have an instance of VS Code running on a Linux VM on GCP, you need to configure your Mac laptop to be able to act as a client to the remote VM. Follow these steps:\n\n1. To connect to the remote VS Code from your Mac, you must first install `XQuartz` on your Mac. You can execute the following command on your Mac to install it:\n\n```\nbrew install xquartz\n```\nOr, you can follow the instructions from the following [tutorial](https://und.edu/research/computational-research-center/tutorials/mac-x11.html) from the University of North Dakota.\n\nAfter the pipeline for project `vscvmsetup` successfully executes to completion (pipeline you manually executed in the previous section), you can connect to the remote VS Code as follows:\n\n2. Launch `XQuartz` on your Mac (it should be located in your Applications folder). Its launching should open up an `xterm` on your Mac. If it does not, then you can select **Applications > Terminal** from the `XQuartz` top menu. \n3. On the `xterm`, enter the following command:\n\n```\ngcloud compute ssh --zone \"[GCP zone]\" \"[name of your VM]\" --project \"[GCP project]\" --ssh-flag=\"-Y\"\n```\nWhere:\n\n- `[VM name]` is the name of the VM you created in project `gcpvmlnxvsc`. Its value should be the same as the `gcp_project` variable.\n- `[GCP zone]` is the zone where the VM is running. Its value should be the same as the `gcp_vmname` variable.\n- `[GCP project]` is the name of your GCP project assigned name. Its value should be the same as the `gcp_project` variable.\n\n***Note: If you have not installed the Google Cloud CLI, please do so by following the [Google documentation](https://cloud.google.com/sdk/docs/install).***\n\n4. If you have not used SSH on your Mac before, you may not have a `.ssh` in your `HOME` directory. If this is the case, you will be asked if you would like to continue with the creation of this directory. Answer **Y**.\n\n5. Next, you will be asked to enter the same password twice to generate a public/private key. Enter the same password you used when defining the variable `vm_pwd` in the required configuration above.\n\n6. Once the SSH key is done propagating, you will need to enter the password again two times to log in to the VM.\n\n7. You should now be logged in to the VM.\n\n### Create a personal access token\n\nThe assumption here is that you already have a GitLab project that you would want to open from and work on the remote VS Code. To do this, you will need to clone your GitLab project from the VM. First, you will be using a personal access token (PAT) to clone your project.\n\n1. Head over to your GitLab project (the one that you'd like to open from the remote VS Code).\n2. From your GitLab project, create a [PAT](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#create-a-personal-access-token), name it `pat-gcpvm` and ensure that it has the following scopes: `read_repository`, `write_repository`, `read_registry`, `write_registry`, and `ai_features`\n3. Save the generated PAT somewhere safe; you will need it later.\n\n### Clone the read_repository\n\n1. On your local Mac, from the `xterm` where you are logged on to the remote VM, enter the following command:\n\n```\ngit clone https://[your GitLab username]:[personal_access_token]@gitlab.com/[GitLab project name].git \n```\n\nWhere:\n\n- `[your GitLab username]` is your GitLab handle.\n- `[personal_access_token]` is the PAT you created in the previous section.\n- `[GitLab project name]` is the name of the project that contains the GitLab Code Suggestions test cases.\n\n## Launch Visual Studio Code\n\n1. From the `xterm` where you are logged in to the VM, enter the following command:\n\n```\ncode\n```\n\nWait for a few seconds and Visual Studio Code will appear on your Mac screen.\n\n2. From the VS Code menu, select **File > Open Folder...\"\n3. In the File chooser, select the top-level directory of the GitLab project you cloned in the previous section\n\nThat's it! You're ready to start working on your cloned GitLab project using the VS Code that you installed on a remote Linux-based VM.\n\n### Troubleshooting\n\nWhile using the remotely installed VS Code from your local Mac, you may encounter a few issues. In this section, we provide guidance on how to mitigate them.\n\n#### Keyboard keys not mapped correctly\n\nIf, while running VS Code, you are having issues with your keyboard keys not being mapped correctly, e.g. letter e is backspace, letter r is tab, letter s is clear line, etc., do the following:\n\n1. In VS Code, select **File > Preferences > Settings**.\n1. Search for \"keyboard\". If having issues with the letter e, then search for \"board\". Click on the \"Keyboard\" entry under \"Application.\"\n1. Ensure that the Keyboard Dispatch is set to \"keyCode.\"\n1. Restart VS Code.\n1. If you need further help, this is a good resource for [keyboard problems](https://github.com/microsoft/vscode/wiki/Keybinding-Issues#troubleshoot-linux-keybindings).\n\n#### Error loading webview: Error\n\nIf while running VS Code, you get a message saying:\n\n\"Error loading webview: Error: Could not register service worker: InvalidStateError: Failed to register a ServiceWorker: The document is in an invalid state.\"\n\n1. Exit VS Code and then enter this cmd from the `xterm` window:\n\n`killall code`\n\nYou may need to execute this command two or three times in a row to kill all VS Code processes.\n\n2. Ensure that all VS Code-related processes are gone by entering the following command from the `xterm` window:\n\n`ps -ef | grep code`\n\n3. Once all the VS Code-related processes are gone, restart VS Code by entering the following command from the `xterm` window:\n\n`code`\n\n#### Some useful commands to debug SSH\n\nHere are some useful commands to run on the VM that can help you debug SSH issues:\n\n1. To get the status, location and latest event of sshd:\n\n`sudo systemctl status ssh`\n\n2. To see the log of sshd:\n\n`journalctl -b -a -u ssh`\n\n3. To restart to SSH daemon:\n\n`sudo systemctl restart ssh.service`\n\nOr\n\n`sudo systemctl restart ssh`\n\n4. To start a root shell:\n\n`sudo -s`\n\n## Get started\n\nThis article described how to:\n- instantiate a Linux-based VM on GCP\n- install VS Code and dependencies on the remote VM\n- clone an existing GitLab project of yours in the remote VM\n- open your remotely cloned project from the remotely installed VS Code\n\nAs a result, you can basically use your laptop as a thin client that accesses a remote server, where all the work takes place.\n\n> The automation to get all these parts in place was done by GitLab. Sign up for a [free 30-day GitLab Ultimate trial](https://about.gitlab.com/free-trial/) to get started today!",[873,726,703],{"slug":1106,"featured":92,"template":678},"tutorial-install-vs-code-on-a-cloud-provider-vm-and-set-up-remote-access","content:en-us:blog:tutorial-install-vs-code-on-a-cloud-provider-vm-and-set-up-remote-access.yml","Tutorial Install Vs Code On A Cloud Provider Vm And Set Up Remote Access","en-us/blog/tutorial-install-vs-code-on-a-cloud-provider-vm-and-set-up-remote-access.yml","en-us/blog/tutorial-install-vs-code-on-a-cloud-provider-vm-and-set-up-remote-access",{"_path":1112,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1113,"content":1119,"config":1124,"_id":1126,"_type":16,"title":1127,"_source":17,"_file":1128,"_stem":1129,"_extension":20},"/en-us/blog/refactoring-a-ci-cd-template-to-a-ci-cd-component",{"title":1114,"description":1115,"ogTitle":1114,"ogDescription":1115,"noIndex":6,"ogImage":1116,"ogUrl":1117,"ogSiteName":692,"ogType":693,"canonicalUrls":1117,"schema":1118},"Refactoring a CI/CD template to a CI/CD component","CI/CD components are the next generation of CI/CD templates, enhancing pipeline creation and maintenance. Learn how to transition from templates to components.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665989/Blog/Hero%20Images/AdobeStock_618473457.jpg","https://about.gitlab.com/blog/refactoring-a-ci-cd-template-to-a-ci-cd-component","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Refactoring a CI/CD template to a CI/CD component\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2024-03-04\",\n      }",{"title":1114,"description":1115,"authors":1120,"heroImage":1116,"date":1121,"body":1122,"category":14,"tags":1123},[1019],"2024-03-04","GitLab recently introduced [CI/CD components](https://docs.gitlab.com/ee/ci/components/) as the next generation of the traditional CI/CD templates, and a novel approach to constructing CI/CD pipelines. CI/CD components offer reusable pipeline configurations that can be customized using input parameters.\n\nAlthough GitLab continues to support templates, they come with certain drawbacks that are addressed by the introduction of components. Therefore, we highly recommend refactoring existing templates into CI/CD components.\n\nThis article will guide you through the steps of converting your current GitLab CI/CD templates into reusable CI/CD components. Prior familiarity with how to create CI/CD components is a prerequisite, which you can learn about in this blog post: [Introducing the GitLab CI/CD Catalog Beta](https://about.gitlab.com/blog/introducing-the-gitlab-ci-cd-catalog-beta/).\n\n## How to convert a template to a component\n\nThese are the steps to convert a CI/CD template to a CI/CD component:\n1. Create a component project if you don’t have one. \n2. Copy your existing templates to the ‘templates’ directory in the component project. \n3. For each template, review the jobs listed in it and assess whether you prefer to distribute them across different components or retain some or all within the same component. While it's possible to include multiple jobs in a single component, it's advisable to create components that perform minimal tasks. This approach enhances ease of reuse and flexibility.\n4. Create a new section at the top of the configuration for the input parameters and meta data using the `spec` keyword. \n5. Replace any custom CI/CD variables and any other hard-coded values with [inputs](https://about.gitlab.com/blog/use-inputs-in-includable-files/) to maximize flexibility for consumption. Consider parameterizing elements such as stage, image, job name/job prefix, etc. \n6. Follow the [best practices](https://docs.gitlab.com/ee/ci/components/index.html#best-practices) for components.\n7. Improve the configuration, for example by enabling [merge request pipelines](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html) or making it [more efficient](https://docs.gitlab.com/ee/ci/pipelines/pipeline_efficiency.html).\n\nHere is a code example of a job in an existing template:\n\n![existing template](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678076/Blog/Content%20Images/Screenshot_2024-03-03_at_12.05.25.png)\n\nAnd this is the refactored [component code](https://gitlab.com/components/aws/-/blob/main/templates/ec2-deploy-production.yml?ref_type=heads):\n\n![Converted component](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678077/Blog/Content%20Images/Screenshot_2024-03-03_at_12.07.14.png)\n\nOnce your components are ready, you can publish them to the CI/CD catalog so others will be able to find and consume them. \n\n## Take a product tour\n\nWe've prepared a brief product tour so you can quickly dive into the CI/CD catalog and see it in action (use the \"Next\" button to progress through the demo).\n\n[![Product tour of CI/CD catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678076/Blog/Content%20Images/Screenshot_2024-03-03_at_12.18.09.png)](https://gitlab.navattic.com/cicd-catalog)\n\n## Additional resources \n\nPlease refer to the official documentation on CI/CD components for more on how to [convert templates to components](https://docs.gitlab.com/ee/ci/components/#convert-a-cicd-template-to-a-component).\n\nYou can explore [an additional practical example](https://docs.gitlab.com/ee/ci/components/examples.html#cicd-component-migration-examples), demonstrating the steps to convert GitLab Go templates to CI/CD components.\n\nThen, you can watch the following video where [Fabio Pitino](https://about.gitlab.com/company/team/#fabiopitino), GitLab Principal Engineer, demonstrates the process of refactoring GitLab AWS templates to CI/CD components.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/dGCPrIAuBmE?si=1vjG_aEziY5jn-YC\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line –->\n",[110,725,726],{"slug":1125,"featured":92,"template":678},"refactoring-a-ci-cd-template-to-a-ci-cd-component","content:en-us:blog:refactoring-a-ci-cd-template-to-a-ci-cd-component.yml","Refactoring A Ci Cd Template To A Ci Cd Component","en-us/blog/refactoring-a-ci-cd-template-to-a-ci-cd-component.yml","en-us/blog/refactoring-a-ci-cd-template-to-a-ci-cd-component",{"_path":1131,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1132,"content":1138,"config":1145,"_id":1147,"_type":16,"title":1148,"_source":17,"_file":1149,"_stem":1150,"_extension":20},"/en-us/blog/revisiting-the-variables-management-workflow",{"title":1133,"description":1134,"ogTitle":1133,"ogDescription":1134,"noIndex":6,"ogImage":1135,"ogUrl":1136,"ogSiteName":692,"ogType":693,"canonicalUrls":1136,"schema":1137},"Revisiting the variables management workflow","Our users helped us identify the hurdles in the variables management experience and we used those insights to guide improvements.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098484/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_618473457_nd5Dr8kfGdrlTWLOPmDjb_1750098483284.jpg","https://about.gitlab.com/blog/revisiting-the-variables-management-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Revisiting the variables management workflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Veethika Mishra\"}],\n        \"datePublished\": \"2024-02-26\",\n      }",{"title":1133,"description":1134,"authors":1139,"heroImage":1135,"date":1141,"body":1142,"category":14,"tags":1143},[1140],"Veethika Mishra","2024-02-26","CI/CD variables play a vital role in building and maintaining CI/CD pipelines and platforms. They are an essential part of the majority of developers’ workflows, serving a range of purposes from storing reusable information to maintaining data integrity. Given their significance, we made enhancing workflows related to CI/CD variables a priority.\nRecently, we conducted interviews with users representing different [personas](https://handbook.gitlab.com/handbook/product/personas/#list-of-user-personas) related to software development, working in teams with different structural and cultural dynamics. Our aim was to gain insights into the challenges they encounter when using and managing CI/CD variables within GitLab. The feedback helped us gain valuable perspective, guiding us toward [necessary improvements](https://gitlab.com/gitlab-org/gitlab/-/issues/418331) in these workflows. Some of the notable changes are highlighted in this blog.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/gdL2cEp3kw0?si=aNmhofDU3DsnofiP\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Better management\n\n![variables management - image 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098505/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098504762.png)\n\nEffective decision-making regarding the addition, modification, or removal of CI/CD variables hinges on understanding their purpose within a project or group. Lacking visibility into a variable's purpose can complicate these decisions. To address this challenge, we've introduced an enhancement to the variable creation process that will allow users to provide a description detailing the usage and context of a variable, reducing reliance on memory. This description will be displayed in the list, along with the other attributes of the variable. \n\n## Seamless task continuity\n\n![variables management - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098505/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098504763.png)\n\nEfficiency is paramount in software development as it allows developers to make time to focus on qualitative aspects of their work. We have changed the variable creation workflow to facilitate consecutive addition or editing of multiple variables to boost efficiency. Improved, clear notifications and contextual error messages ensure users can perform tasks without the need to repeatedly open separate forms.\n\n## Enhanced error prevention\n\n![variables management - image 3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098505/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098504764.png)\n\nHow the error messages are presented and made accessible in a workflow determines their effectiveness in error resolution. We revisited the different error states users are likely to encounter during variable creation and editing workflow and identified the improvement opportunities ranging from adding new validations and help-texts to enhancing existing error-handling states.\n\n## Share your feedback\nWe believe in taking an iterative approach to better the product. We used insights from the recent user research and our best judgment when deciding on the changes, but there’s always room for improvement. Your feedback from your experience of using the changed UI for performing the tasks in your everyday work will help us understand what’s working and what isn’t, and, therefore, decide on future iterations. Please head to our [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/441177) to share your thoughts and suggestions on the changes made.\n\n## What’s next?\nAs we work on making the existing variables workflow more usable, we’re also making progress on the [GitLab Secret Manager](https://about.gitlab.com/direction/govern/pipeline_security/secrets_management/#overview) to provide users with a more secure method for enabling GitLab, or a component built within GitLab, to connect to other systems.\n\nThere’s an ongoing effort to [improve the variables table layout to clearly represent the visual hierarchy](https://gitlab.com/gitlab-org/gitlab/-/issues/403176) between group and project variables and enhancing the [audit history for CI variables](https://gitlab.com/gitlab-org/gitlab/-/issues/416148) to provide better visibility into activities related to variables.\n\n## Read more about our UI improvements\n- [How we overhauled GitLab navigation](https://about.gitlab.com/blog/navigation-research-blog-post/)\n- [Beautifying our UI: Giving GitLab build features a fresh look](https://about.gitlab.com/blog/beautifying-of-our-ui/)\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[110,1144,725,894],"UX",{"slug":1146,"featured":92,"template":678},"revisiting-the-variables-management-workflow","content:en-us:blog:revisiting-the-variables-management-workflow.yml","Revisiting The Variables Management Workflow","en-us/blog/revisiting-the-variables-management-workflow.yml","en-us/blog/revisiting-the-variables-management-workflow",{"_path":1152,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1153,"content":1159,"config":1165,"_id":1167,"_type":16,"title":1168,"_source":17,"_file":1169,"_stem":1170,"_extension":20},"/en-us/blog/how-to-translate-bamboo-agent-capabilities-to-gitlab-runner-tags",{"title":1154,"description":1155,"ogTitle":1154,"ogDescription":1155,"noIndex":6,"ogImage":1156,"ogUrl":1157,"ogSiteName":692,"ogType":693,"canonicalUrls":1157,"schema":1158},"How to translate Bamboo agent capabilities to GitLab Runner tags  ","This tutorial demonstrates how to use tags to organize GitLab Runners when building complex CI/CD pipelines.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663019/Blog/Hero%20Images/AdobeStock_519147119.jpg","https://about.gitlab.com/blog/how-to-translate-bamboo-agent-capabilities-to-gitlab-runner-tags","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to translate Bamboo agent capabilities to GitLab Runner tags  \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2024-02-22\",\n      }",{"title":1154,"description":1155,"authors":1160,"heroImage":1156,"date":1162,"body":1163,"category":14,"tags":1164},[1161],"Abubakar Siddiq Ango","2024-02-22","CI pipelines often start simple – a single job building a binary and pushing it to an artifact repository or to some production environment. Ever-changing software requirements introduce more complexities, such as adding more jobs to perform certain checks and reviewing the output before the final build job is executed.  \n\nThese complexities increase exponentially when builds are expected to target varying systems with different system architectures or resource needs. This is evident in projects like operating systems, mobile apps, or software distributions that support multiple deployment platforms. To account for the varying needs of builds in these types of environments, having multiple runners that match needed requirements is key, and that's where [GitLab Runner](https://docs.gitlab.com/runner/) tags come in. If you are coming from Atlassian's Bamboo, they are called \"agent capabilities.\"\n\nRunner tags allow organizing runners by a tag that signifies a specific use case they support; these tags are then used to make sure CI jobs run on a runner that meets their requirements. A job can require GPU resources that are only available on a handful of runners; tagging the job to the tags of the runner allows it to be scheduled on the runner with GPUs.\n\nAgent capabilities on Bamboo are used to achieve the same functionality by specifying binaries or custom identifiers that must be matched or available for a job to run on a Bamboo agent. In this blog post, we will be looking at how to translate Bamboo agent capabilities to GitLab Runner tags. \n\nBamboo has varying agent capabilities:\n- Executable capability specifies executables that are available on an agent.\n- JDK capability specifies that the Java Development Kit is installed and available for builds.\n- Version Control capability lets Bamboo know the version control systems set up on an agent and where the client application is located.\n- Docker capability is used to define the agents where Docker is installed for Docker tasks\n- Custom capability uses key/value identifiers to identify a unique functionality an agent provides.\n\nGitLab makes the process easier by using tags to identify Runners, some of which can be assigned multiple tags to denote the varying functionalities they can provide to jobs. Let's look at how you can use Runner tags in GitLab.\n\n## Adding tags to GitLab Runner\n\nWhen [registering a runner](https://docs.gitlab.com/runner/register/index.html) after installation, one of the steps requires providing a list of comma-separated tags that can be used. If none are provided at this stage, you can always edit the `/etc/gitlab-runner/config.toml` file and add any missing tags.\n\nYou can also manage the tags of a runner in GitLab by accessing the runner's edit page and updating the `Tags` field. You have the option for the runner to be exclusive to jobs that are tagged appropriately, or when there are no tagged jobs to run, it should run untagged jobs, too. Checking `Run untagged jobs` enables this behavior.\n\n## Using tags in .gitlab-ci.yaml file\n\nTo run a job on a specific runner, add the relevant tags to the job's configuration, as shown below:\n\n```yaml\nbuild_ios:\n  image: macos-13-xcode-14\n  stage: build\n  script:\n    - bundle check --path vendor/bundle || bundle install --path vendor/bundle --jobs $(nproc)\n    - bundle exec fastlane build\n  tags: \n    - saas-macos-medium-m1\n```\nIn the example above, the job builds an iOS application only on runners operating on a macOS device with an M1 chip and tagged `saas-macos-medium-m1`.\n\n## Using multiple tags\n\nA job can specify multiple tags to target a diverse range of runners, especially in organizations that run several fleets of runners as part of their software development lifecycle. A job will only run if a runner is found that has all the tags the job has been tagged with. For example, if a job has `[linux, android, fastlane]` tags, a runner with `[ android, fastlane]` or `[ linux, android]` will not execute the job because the full set of tags does not match the runner.\n\n## Dynamic jobs with tags and variables\n\nYou can use variables to determine the values of tags and thus dynamically influence which runners pick up the jobs. For example:\n\n```\nvariables:\n  KUBERNETES_RUNNER: kubernetes\n\n  job:\n    tags:\n      - docker\n      - $KUBERNETES_RUNNER\n    script:\n      - echo \"Hello runner selector feature\"\n\n``` \n\nIn this example, only runners tagged with `kubernetes` will execute the job. You can take this further in more complex pipelines with [`parallel: matrix`](https://docs.gitlab.com/ee/ci/yaml/index.html#parallelmatrix). Here is an example:\n\n```\ndeploystacks:\n  stage: deploy\n  parallel:\n    matrix:\n      - PROVIDER: aws\n        STACK: [monitoring, app1]\n      - PROVIDER: gcp\n        STACK: [data]\n  tags:\n    - ${PROVIDER}-${STACK}\n  environment: $PROVIDER/$STACK\n\n```\n\nThis example ends up with three parallel jobs with three different tags for each: `aws-monitoring`, `aws-app1` and `gcp-data`, thus targeting possibly three different runners.\n\nUsing tags in your GitLab CI configuration gives you the flexibility to determine where and how your applications are built, to use resources more efficiently as scarce resources can be limited to certain runners, and to determine how jobs are allocated to those runners.\n\n> Learn more about [how to make the move from Atlassian to GitLab](https://about.gitlab.com/move-to-gitlab-from-atlassian/).\n",[110,832,726],{"slug":1166,"featured":92,"template":678},"how-to-translate-bamboo-agent-capabilities-to-gitlab-runner-tags","content:en-us:blog:how-to-translate-bamboo-agent-capabilities-to-gitlab-runner-tags.yml","How To Translate Bamboo Agent Capabilities To Gitlab Runner Tags","en-us/blog/how-to-translate-bamboo-agent-capabilities-to-gitlab-runner-tags.yml","en-us/blog/how-to-translate-bamboo-agent-capabilities-to-gitlab-runner-tags",{"_path":1172,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1173,"content":1179,"config":1185,"_id":1187,"_type":16,"title":1188,"_source":17,"_file":1189,"_stem":1190,"_extension":20},"/en-us/blog/compose-readers-and-writers-in-golang-applications",{"title":1174,"description":1175,"ogTitle":1174,"ogDescription":1175,"noIndex":6,"ogImage":1176,"ogUrl":1177,"ogSiteName":692,"ogType":693,"canonicalUrls":1177,"schema":1178},"Compose Readers and Writers in Golang applications","GitLab streams terabytes of Git data every hour using Golang abstractions of I/O implementations. Learn how to compose Readers and Writers in Golang apps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099464/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_639935439_3oqldo5Yt5wPonEJYZOLTM_1750099464124.jpg","https://about.gitlab.com/blog/compose-readers-and-writers-in-golang-applications","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Compose Readers and Writers in Golang applications\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Igor Drozdov\"}],\n        \"datePublished\": \"2024-02-15\",\n      }",{"title":1174,"description":1175,"authors":1180,"heroImage":1176,"date":1182,"body":1183,"category":14,"tags":1184},[1181],"Igor Drozdov","2024-02-15","Every hour, GitLab transfers terabytes of Git data between a server and a client. It is hard or even impossible to handle this amount of traffic unless it is done efficiently in a streaming fashion. Git data is served by Gitaly (Git server), GitLab Shell (Git via SSH), and Workhorse (Git via HTTP(S)). These services are implemented using Go - the language that conveniently provides abstractions to efficiently deal with I/O operations.\n\nGolang's [`io`](https://pkg.go.dev/io) package provides [`Reader`](https://pkg.go.dev/io#Reader) and [`Writer`](https://pkg.go.dev/io#Writer) interfaces to abstract the functionality of I/O implementations into public interfaces.\n\n`Reader` is the interface that wraps the basic `Read` method:\n\n```go\ntype Reader interface {\n\tRead(p []byte) (n int, err error)\n}\n```\n\n`Writer` is the interface that wraps the basic `Write` method.\n\n```go\ntype Writer interface {\n\tWrite(p []byte) (n int, err error)\n}\n```\n\nFor example, [`os`](https://pkg.go.dev/os) package provides an implementation of reading a file. `File` type implements `Reader` and `Writer` interfaces by defining basic [`Read`](https://pkg.go.dev/os#File.Read) and [`Write`](https://pkg.go.dev/os#File.Write) functions.\n\nIn this blog post, you'll learn how to compose Readers and Writers in Golang applications.\n\nFirst, let's read from a file and write its content to [`os.Stdout`](https://cs.opensource.google/go/go/+/master:src/os/file.go;l=66?q=Stdout&ss=go%2Fgo).\n\n```go\nfunc main() {\n\tfile, err := os.Open(\"data.txt\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer file.Close()\n\n\tp := make([]byte, 32 * 1024)\n\tfor {\n\t\tn, err := file.Read(p)\n\n\t\t_, errW := os.Stdout.Write(p[:n])\n\t\tif errW != nil {\n\t\t\tlog.Fatal(errW)\n\t\t}\n\n\t\tif err != nil {\n\t\t\tif errors.Is(err, io.EOF) {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n}\n```\n\nEach call of the `Read` function fills the buffer `p` with the content from the file, i.e. the file is being consumed in chunks (up to `32KB`) instead of being fully loaded into the memory.\n\nTo simplify this widely used pattern, `io` package conveniently provides [`Copy`](https://pkg.go.dev/io#Copy) function that allows passing content from any `Reader` to any `Writer` and also [handles](https://cs.opensource.google/go/go/+/refs/tags/go1.21.0:src/io/io.go;l=433) additional edge cases.\n\n```go\nfunc main() {\n\tfile, err := os.Open(\"data.txt\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer file.Close()\n\n\tif _, err := io.Copy(os.Stdout, file); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n```\n\n`Reader` and `Writer` interfaces are used across the whole Golang ecosystem because they facilitate reading and writing content in a streaming fashion. Therefore, gluing together the Readers and Writers with the functions that expect these interfaces as arguments is a frequent problem to solve. Sometimes it's as straightforward as passing content from a Reader into a Writer, but sometimes the content written into a Writer must be represented as a Reader or the content from a reader must be sent into multiple Writers. Let's have a closer look into different use cases and the examples of solving these types of problems in the `GitLab` codebase.\n\n## Reader -> Writer\n\n**Problem**\n\nWe need to pass content from a Reader into a Writer.\n\n![readers and writers - image 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099495/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099494917.png)\n\n**Solution**\n\nThe problem can be solved by using [`io.Copy`](https://pkg.go.dev/io#Copy).\n\n```go\nfunc Copy(dst Writer, src Reader) (written int64, err error)\n```\n\n**Example**\n\n[`InfoRefs*`](https://gitlab.com/gitlab-org/gitlab/blob/57aafb6a886d05c15dd0fa372fb4f008bec014ea/workhorse/internal/gitaly/smarthttp.go#L18-35) Gitaly RPCs return a `Reader` and we want to [stream](https://gitlab.com/gitlab-org/gitlab/blob/57aafb6a886d05c15dd0fa372fb4f008bec014ea/workhorse/internal/git/info-refs.go#L78-80) its content to a user via HTTP response:\n\n```go\nfunc handleGetInfoRefsWithGitaly(ctx context.Context, responseWriter *HttpResponseWriter, a *api.Response, rpc, gitProtocol, encoding string) error {\n        ...\n        infoRefsResponseReader, err := smarthttp.InfoRefsResponseReader(ctx, &a.Repository, rpc, gitConfigOptions(a), gitProtocol)\n        ...\n        if _, err = io.Copy(w, infoRefsResponseReader); err != nil {\n            return err\n        }\n        ...\n}\n```\n\n## Reader -> Multiple Writers\n\n**Problem**\n\nWe need to pass content from a Reader into multiple Writers.\n\n![readers and writers - image 3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099495/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750099494917.png)\n\n**Solution**\n\nThe `io` package provides [`io.MultiWriter`](https://pkg.go.dev/io#MultiWriter) function that _converts_ multiple Writers into a single one. When its `Write` function is called, the content is copied to all the Writers ([implementation](https://cs.opensource.google/go/go/+/refs/tags/go1.21.0:src/io/multi.go;l=127)).\n\n```go\nfunc MultiWriter(writers ...Writer) Writer\n```\n\n**Example**\n\nGiven we want to [build](https://gitlab.com/gitlab-org/gitlab/blob/57aafb6a886d05c15dd0fa372fb4f008bec014ea/workhorse/internal/upload/destination/multi_hash.go#L13-18) `md5`, `sha1`, `sha256` and `sha512` hashes from the same content. [`Hash`](https://pkg.go.dev/hash#Hash) type is a `Writer`. Using `io.MultiWriter`, we define [`multiHash`](https://gitlab.com/gitlab-org/gitlab/blob/57aafb6a886d05c15dd0fa372fb4f008bec014ea/workhorse/internal/upload/destination/multi_hash.go#L43-61) Writer. After the content is [written](https://gitlab.com/gitlab-org/gitlab/blob/57aafb6a886d05c15dd0fa372fb4f008bec014ea/workhorse/internal/upload/destination/destination.go#L124-125) to the `multiHash`, we [calculate](https://gitlab.com/gitlab-org/gitlab/blob/57aafb6a886d05c15dd0fa372fb4f008bec014ea/workhorse/internal/upload/destination/multi_hash.go#L63-70) the hashes of all these functions in a single run.\n\nThe simplified version of the example is:\n\n```go\npackage main\n\nimport (\n\t\"crypto/sha1\"\n\t\"crypto/sha256\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n)\n\nfunc main() {\n\ts1 := sha1.New()\n\ts256 := sha256.New()\n\n\tw := io.MultiWriter(s1, s256)\n\tif _, err := w.Write([]byte(\"content\")); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Println(s1.Sum(nil))\n\tfmt.Println(s256.Sum(nil))\n}\n```\n\nFor simplicity, we just call `Write` function on a Writer, but when content comes from a Reader, then `io.Copy` can be used as well:\n\n```go\n_, err := io.Copy(io.MultiWriter(s1, s256), reader)\n```\n\n## Multiple Readers -> Reader\n\n**Problem**\n\nWe have multiple Readers and need to sequentially read from them.\n\n![readers and writers - image 4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099495/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750099494919.png)\n\n**Solution**\n\nThe `io` package provides [`io.MultiReader`](https://pkg.go.dev/io#MultiReader) function that _converts_ multiple Readers into a single one. The Readers are read in the passed order.\n\n```go\nfunc MultiReader(readers ...Reader) Reader\n```\n\nThen this Reader can be used in any function that accepts `Reader` as an argument.\n\n**Example**\n\nWorkhorse [reads](https://gitlab.com/gitlab-org/gitlab/blob/d97ce3baab7fbf459728ce18766fefd3abb8892f/workhorse/cmd/gitlab-resize-image/png/reader.go#L26-38) the first `N` bytes of an image to detect whether it's a PNG file and _puts them back_ by building a Reader from multiple Readers:\n\n```go\nfunc NewReader(r io.Reader) (io.Reader, error) {\n\tmagicBytes, err := readMagic(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif string(magicBytes) != pngMagic {\n\t\tdebug(\"Not a PNG - read file unchanged\")\n\t\treturn io.MultiReader(bytes.NewReader(magicBytes), r), nil\n\t}\n\n\treturn io.MultiReader(bytes.NewReader(magicBytes), &Reader{underlying: r}), nil\n}\n```\n\n## Multiple Readers -> Multiple Writers\n\n**Problem**\n\nWe need to pass content from multiple Readers into multiple Writers.\n\n![readers and writers - image 6](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099495/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750099494921.png)\n\n**Solution**\n\nThe solutions above can be generalized on the many-to-many use case.\n\n```go\n_, err := io.Copy(io.MultiWriter(w1, w2, w3), io.MultiReader(r1, r2, r3))\n```\n\n## Reader -> Reader + Writer\n\n**Problem**\n\nWe need to read content from a Reader or pass the Reader to a function and simultaneously write the content into a Writer.\n\n![readers and writers - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099495/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099494923.png)\n\n**Solution**\n\nThe `io` package provides [io.TeeReader](https://pkg.go.dev/io#TeeReader) function that accepts a Reader to read from, a Writer to write to, and returns a Reader that can be processed further.\n\n```go\nfunc TeeReader(r Reader, w Writer) Reader\n```\n\nThe [implementation](https://cs.opensource.google/go/go/+/refs/tags/go1.21.4:src/io/io.go;l=610) of the functionality is straightforward. The passed `Reader` and `Writer` are stored in a structure that is a `Reader` itself:\n\n```go\nfunc TeeReader(r Reader, w Writer) Reader {\n\treturn &teeReader{r, w}\n}\n\ntype teeReader struct {\n\tr Reader\n\tw Writer\n}\n```\n\nThe `Read` function implemented for the structure delegates the `Read` to the passed `Reader` and also performs a `Write` to the passed `Writer`:\n\n```\nfunc (t *teeReader) Read(p []byte) (n int, err error) {\n\tn, err = t.r.Read(p)\n\tif n > 0 {\n\t\tif n, err := t.w.Write(p[:n]); err != nil {\n\t\t\treturn n, err\n\t\t}\n\t}\n\treturn\n}\n```\n\n**Example 1**\n\nWe already touched hashing topic in the `Multiple Writers -> Writer` section and `io.TeeReader` is [used](https://gitlab.com/gitlab-org/gitlab/blob/d97ce3baab7fbf459728ce18766fefd3abb8892f/workhorse/internal/upload/destination/destination.go#L124-125) to provide a Writer to create a hash from content. The returned Reader can be further used to upload content to object storage.\n\n**Example 2**\n\nWorkhorse uses `io.TeeReader` to [implement](https://gitlab.com/gitlab-org/gitlab/blob/d97ce3baab7fbf459728ce18766fefd3abb8892f/workhorse/internal/dependencyproxy/dependencyproxy.go#L57-101) Dependency Proxy [functionality](https://docs.gitlab.com/ee/user/packages/dependency_proxy/). Dependency Proxy caches requested upstream images in the object storage. The not-yet-cached use case has the following behavior:\n\n- A user performs an HTTP request.\n- The upstream image is fetched using [`net/http`](https://pkg.go.dev/net/http) and [`http.Response`](https://pkg.go.dev/net/http#Response) provides its content via `Body` field, which is [`io.ReadCloser`](https://pkg.go.dev/io#ReadCloser) (basically an `io.Reader`).\n- We need to send this content back to the user by writing it into [`http.ResponseWriter`](https://pkg.go.dev/net/http#ResponseWriter) (basically an `io.Writer`).\n- We need to simultaniously upload the content to object storage by performing an [`http.Request`](https://pkg.go.dev/net/http#NewRequest) (a function that accepts an `io.Reader`).\n\nAs a result, `io.TeeReader` can be used to glue these primitives together:\n\n```go\nfunc (p *Injector) Inject(w http.ResponseWriter, r *http.Request, sendData string) {\n\t// Fetch upstream data via HTTP\n\tdependencyResponse, err := p.fetchUrl(r.Context(), sendData)\n\t...\n\t// Create a tee reader. Each Read will read from dependencyResponse.Body and simultaneously\n        // perform a Write to w writer\n\tteeReader := io.TeeReader(dependencyResponse.Body, w)\n\t// Pass the tee reader as the body of an HTTP request to upload it to object storage\n\tsaveFileRequest, err := http.NewRequestWithContext(r.Context(), \"POST\", r.URL.String()+\"/upload\", teeReader)\n\t...\n\tnrw := &nullResponseWriter{header: make(http.Header)}\n\tp.uploadHandler.ServeHTTP(nrw, saveFileRequest)\n\t...\n```\n\n## Writer -> Reader\n\n**Problem**\n\nWe have a function that accepts a Writer, and we are interested in the content that the function would write into the Writer. We want to intercept the content and represent it as a Reader to further process it in a streaming fashion.\n\n![readers and writers - image 5](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099495/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750099494924.png)\n\n**Solution**\n\nThe `io` package provides [`io.Pipe`](https://pkg.go.dev/io#Pipe) function that returns a Reader and a Writer:\n\n```go\nfunc Pipe() (*PipeReader, *PipeWriter)\n```\n\nThe Writer can be used to be passed to the function that accepts a Writer. All the content that has been written into it will be accessible via the reader, i.e. a synchronous in-memory pipe is created that can be used to connect code expecting an `io.Reader` with code expecting an `io.Writer`.\n\n**Example 1**\n\nFor [LSIF](https://lsif.dev/) file [transformation](https://gitlab.com/gitlab-org/gitlab/blob/d97ce3baab7fbf459728ce18766fefd3abb8892f/workhorse/internal/lsif_transformer/parser/parser.go#L68-72) for code navigation we need to:\n\n- [Read](https://gitlab.com/gitlab-org/gitlab/blob/d97ce3baab7fbf459728ce18766fefd3abb8892f/workhorse/internal/lsif_transformer/parser/parser.go#L48-51) content of a zip file.\n- Transform the content and [serialize](https://gitlab.com/gitlab-org/gitlab/blob/d97ce3baab7fbf459728ce18766fefd3abb8892f/workhorse/internal/lsif_transformer/parser/docs.go#L97-112) it into [`zip.Writer`](https://pkg.go.dev/archive/zip#Writer).\n- [Represent](https://gitlab.com/gitlab-org/gitlab/blob/d97ce3baab7fbf459728ce18766fefd3abb8892f/workhorse/internal/lsif_transformer/parser/parser.go#L68-72) the new compressed content as a Reader to be further processed in a streaming fashion.\n\nThe [`zip.NewWriter`](https://pkg.go.dev/archive/zip#NewWriter) function accepts a Writer to which it will write the compressed content. It is handy when we need to pass an open file descriptor to the function to save the content to the file. However, when we need to pass the compressed content via an HTTP request, we need to represent the data as a Reader.\n\n```go\n// The `io.Pipe()` creates a reader and a writer.\npr, pw := io.Pipe()\n\n// The writer is passed to `parser.transform` function which will write\n// the transformed compressed content into it\n// The writing should happen asynchronously in a goroutine because each `Write` to\n// the `PipeWriter` blocks until it has satisfied one or more `Read`s from the `PipeReader`.\ngo parser.transform(pw)\n\n// Everything that has been written into it is now accessible via the reader.\nparser := &Parser{\n\tDocs: docs,\n\tpr:   pr,\n}\n\n// pr is a reader that can be used to read all the data written to the pw writer\nreturn parser, nil\n```\n\n**Example 2**\n\nFor Geo setups [GitLab Shell](https://gitlab.com/gitlab-org/gitlab-shell) proxies all `git push` operations to secondary and redirects them to primary.\n\n- GitLab Shell establishes an SSH connection and defines [`ReadWriter`](https://gitlab.com/gitlab-org/gitlab-shell/blob/7898d8e69daf51a7b6e01052c4516ca70893a2d4/internal/command/readwriter/readwriter.go#L6-7) struct that has `In` field of `io.Reader` type to read data from a user and `Out` field of `io.Writer` type to send response to the user.\n- GitLab Shell performs an HTTP request to `/info/refs` and sends `response.Body` of type `io.Reader` to the user using [`io.Copy`](https://gitlab.com/gitlab-org/gitlab-shell/blob/7898d8e69daf51a7b6e01052c4516ca70893a2d4/internal/command/githttp/push.go#L60)\n- The user reacts to this response by sending data to `In` and GitLab Shell needs to read this data, convert it to a request expected by Git HTTP, and send it as an HTTP request to `/git-receive-pack`. This is where `io.Pipe` becomes useful.\n\n```go\nfunc (c *PushCommand) requestReceivePack(ctx context.Context, client *git.Client) error {\n\t// Define pipeReader and pipeWriter and use pipeWriter to collect all the data\n\t//sent by the user converted to a format expected by Git HTTP.\n\tpipeReader, pipeWriter := io.Pipe()\n\t// The writing happens asynchronously because it's a blocking operation\n\tgo c.readFromStdin(pipeWriter)\n\n\t// pipeReader can be passed as io.Reader and used to read all the data written to pipeWriter\n\tresponse, err := client.ReceivePack(ctx, pipeReader)\n\t...\n\t_, err = io.Copy(c.ReadWriter.Out, response.Body)\n\t...\n}\n\nfunc (c *PushCommand) readFromStdin(pw *io.PipeWriter) {\n\tvar needsPackData bool\n\n\t// Scanner reads the user input line by line\n\tscanner := pktline.NewScanner(c.ReadWriter.In)\n\tfor scanner.Scan() {\n\t\tline := scanner.Bytes()\n\t\t// And writes it to the pipe writer\n\t\tpw.Write(line)\n\t\t...\n\t}\n\n\t// The data that hasn't been processed by a scanner is copied if necessary\n\tif needsPackData {\n\t\tio.Copy(pw, c.ReadWriter.In)\n\t}\n\n\t// Close the pipe writer to signify EOF for the pipe reader\n\tpw.Close()\n}\n```\n\n## Try Golang\n\nGolang provides elegant patterns designed to efficiently process data in a streaming fashion. The patterns can be used to address new challenges or refactor the existing performance issues associated with high memory consumption.\n\n> Learn more about [GitLab and Golang](https://docs.gitlab.com/ee/development/go_guide/).\n",[726,702,915,704],{"slug":1186,"featured":6,"template":678},"compose-readers-and-writers-in-golang-applications","content:en-us:blog:compose-readers-and-writers-in-golang-applications.yml","Compose Readers And Writers In Golang Applications","en-us/blog/compose-readers-and-writers-in-golang-applications.yml","en-us/blog/compose-readers-and-writers-in-golang-applications",{"_path":1192,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1193,"content":1198,"config":1205,"_id":1207,"_type":16,"title":1208,"_source":17,"_file":1209,"_stem":1210,"_extension":20},"/en-us/blog/quick-setup-of-a-gke-cluster-with-argocd-pre-installed-using-terraform",{"title":1194,"description":1195,"ogTitle":1194,"ogDescription":1195,"noIndex":6,"ogImage":1116,"ogUrl":1196,"ogSiteName":692,"ogType":693,"canonicalUrls":1196,"schema":1197},"Quick setup of a GKE Cluster with ArgoCD pre-installed using Terraform","Use this tutorial as a great starting point to manage your cluster entirely through GitOps.","https://about.gitlab.com/blog/quick-setup-of-a-gke-cluster-with-argocd-pre-installed-using-terraform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Quick setup of a GKE Cluster with ArgoCD pre-installed using Terraform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Noah Ing\"},{\"@type\":\"Person\",\"name\":\"Siddharth Mathur\"}],\n        \"datePublished\": \"2024-01-31\",\n      }",{"title":1194,"description":1195,"authors":1199,"heroImage":1116,"date":1201,"body":1202,"category":14,"tags":1203},[789,1200],"Siddharth Mathur","2024-01-31","This tutorial will walk you through setting up a Google Kubernetes Engine (GKE) Cluster with ArgoCD pre-installed, utilizing Terraform, in less than 10 minutes. This will be a great starting point to manage your cluster entirely through GitOps.\n\n### Prerequisites\n- GCP account with permissions to provision a GKE Cluster\n- Kubectl client v1.23.9\n- Kubectl server v1.23.16-gke.1400\n- Working knowledge of GKE\n- Basic knowledge of ArgoCD\n\n#### An overview of this tutorial is as follows:\n- Set up the GitLab Terraform GKE ArgoCD Template \n- Connect to your GKE Cluster\n- Grab the ArgoCD Initial Admin Secret\n- Log into ArgoCD \n- Enjoy your Kubernetes Cluster with ArgoCD!\n\n#### Set up the GitLab Terraform GKE ArgoCD template\n\nStart by importing the example project by URL - [https://gitlab.com/projects/new#import_project](https://gitlab.com/projects/new#import_project).\n\nTo import the project:\n1. In GitLab, on the top bar, select **Main menu > Projects > View all projects**.\n2. On the right of the page, select **New project**.\n3. Select **Import project**.\n4. Select **Repository by URL**.\n5. For the Git repository URL:\n    - [GitLab Terraform GKE ArgoCD](https://gitlab.com/demos/infrastructure/gitlab-terraform-gke-argocd)\n6. Complete the fields and select **Create project**.\n\n#### Add in your cloud credentials to CI/CD variables\n\n1. To authenticate GCP with GitLab, create a GCP service account with the following roles: **Compute Network Viewer, Kubernetes Engine Admin, Service Account User, and Service Account Admin**. Both User and Admin service accounts are necessary. The User role impersonates the default service account when creating the node pool. The Admin role creates a service account in the kube-system namespace.\n2. **Download the JSON file** with the service account key you created in the previous step.\n3. On your computer, encode the JSON file to base64 (replace /path/to/sa-key.json to the path to your key):\n\n```\nbase64 -i /path/to/sa-key.json\n```\n\n4. Use the output of this command as the **BASE64_GOOGLE_CREDENTIALS** environment variable in the next step.\n\nOn the left sidebar, select **Settings > CI/CD. Expand Variables**.\n5. Set the variable **BASE64_GOOGLE_CREDENTIALS** to the base64 encoded JSON file you just created.\n6. Set the variable **TF_VAR_gcp_project** to your GCP’s project name.\n\n![simpleargocd - image 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683298/Blog/Content%20Images/simpleargocd_-_image_1.png)\n\n#### Run GitLab CI to deploy your Kubernetes cluster with ArgoCD Installed.\n\n![simpleargocd - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683298/Blog/Content%20Images/simpleargocd_-_image_2.png)\n\n#### Connect to your GKE Cluster through your terminal using the following bash command.\n\n```bash\ngcloud container clusters get-credentials gitlab-terraform-gke-argocd --region us-central1 --project \u003Cproject-name>\n```\n\n![simpleargocd-image3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683298/Blog/Content%20Images/simpleargocd-image3.png)\n\n#### Expose the Initial Admin Secret through your terminal using the following bash command. Make sure you save this password for later.\n\n```bash\nkubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath=\"{.data.password}\" | base64 -d\n```\n\n#### Port Forward ArgoCD to your localhost 8080 through your terminal using the following bash command. Go to Chrome localhost:8080 afterwards.\n\n```bash\nkubectl port-forward svc/argocd-server -n argocd 8080:443\n```\n\n#### Enter your admin and `Initial Admin Secret` to the login page.\n\n![simpleargocd - image 4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683298/Blog/Content%20Images/simpleargocd_-_image_4.png)\n\n#### Voila! You've bootstrapped your GKE cluster with ArgoCD. Enjoy your GitOps!\n\n![simpleargocd - image 5](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683298/Blog/Content%20Images/simpleargocd_-_image_5.png)\n\n### Next steps\nWe recommend reviewing [setting up Review Ops with ArgoCD](https://about.gitlab.com/blog/how-to-provision-reviewops/)! \n\n### References\n- [GitLab Learn Labs - Infrastructure Webinar](https://gitlab.com/gitlab-learn-labs/webinars/infrastructure/gitlab-terraform-gke-argocd)\n- [Getting started with ArgoCD](https://argo-cd.readthedocs.io/en/release-2.0/getting_started/)\n\n### Related posts\n- [Simple Kubernetes management with GitLab](https://about.gitlab.com/blog/simple-kubernetes-management-with-gitlab/)\n- [How to provision ReviewOps](https://about.gitlab.com/blog/how-to-provision-reviewops/)\n- [The ultimate guide to GitOps with GitLab](https://about.gitlab.com/blog/the-ultimate-guide-to-gitops-with-gitlab/)\n",[535,1204,110,725,750],"GKE",{"slug":1206,"featured":6,"template":678},"quick-setup-of-a-gke-cluster-with-argocd-pre-installed-using-terraform","content:en-us:blog:quick-setup-of-a-gke-cluster-with-argocd-pre-installed-using-terraform.yml","Quick Setup Of A Gke Cluster With Argocd Pre Installed Using Terraform","en-us/blog/quick-setup-of-a-gke-cluster-with-argocd-pre-installed-using-terraform.yml","en-us/blog/quick-setup-of-a-gke-cluster-with-argocd-pre-installed-using-terraform",{"_path":1212,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1213,"content":1219,"config":1225,"_id":1227,"_type":16,"title":1228,"_source":17,"_file":1229,"_stem":1230,"_extension":20},"/en-us/blog/inside-the-improved-ci-logs-management-experience-for-multi-line-commands",{"title":1214,"description":1215,"ogTitle":1214,"ogDescription":1215,"noIndex":6,"ogImage":1216,"ogUrl":1217,"ogSiteName":692,"ogType":693,"canonicalUrls":1217,"schema":1218},"Inside the improved CI logs management experience for multi-line commands","Reviewing log output for CI/CD jobs with multi-line commands is now easier than ever. Find out why, how to configure your pipelines, and what's ahead.\n\n","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099499/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_639935439_3oqldo5Yt5wPonEJYZOLTM_1750099498739.jpg","https://about.gitlab.com/blog/inside-the-improved-ci-logs-management-experience-for-multi-line-commands","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Inside the improved CI logs management experience for multi-line commands\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Romuald Atchadé\"}],\n        \"datePublished\": \"2024-01-25\",\n      }",{"title":1214,"description":1215,"authors":1220,"heroImage":1216,"date":1222,"body":1223,"category":14,"tags":1224},[1221],"Romuald Atchadé","2024-01-25","Improving the GitLab CI/CD log experience for jobs with multi-line commands has been a long-requested feature. With the latest release of GitLab and GitLab Runner, it's now easier to work with the log section for jobs with multi-line commands. In this post, we will describe the experience with the new feature, show you how to enable the new log output in your pipelines, and discuss key points regarding CI/CD script execution and log output in various shells, such as Bash and Powershell.\n\n## Overview of multi-line commands\n\nFirst, it’s helpful to describe what we mean by a CI job with multi-line commands. In GitLab CI the script keyword is used to specify commands to execute for a CI job. In the example below, the build-job has a single command, a basic echo statement, to execute in the script block. \n\n```\n## A pipeline with a single line command in the script block for the build-job\n\nbuild-job:\n  stage: build\n  script:\n    - echo \"this is the script to run for the build job\"\n\n```\n\nIf you were to run this pipeline, then the log output in the UI would display as follows:\nLine 17 - GitLab CI automatically generates a log entry for the command that you specify in the script block.\nLine 18 - This is the output of the command that was executed.\n\n![Ci log management - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099524/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750099524655.png)\n\nNow as you can imagine, the script that you define in the script block will likely be more complex than the example provided and could very well span multiple lines in the CI/CD pipeline file. \n\n```\n## A pipeline with a multi-line command in the script block for the build-job\n\nbuild-job:\n  stage: build\n  script:\n       - |\n         echo \"this is a multi-line command\"  # a simple echo statement\n         ls  \n\n```\n\nIf you were to run this pipeline, then the log output in the UI would display as follows:\n\nLine 17 - As in the previous example, GitLab CI automatically generates a log entry for the command that you specify in the script block. You will notice that line 17 only includes the first command in the script block. This makes it more difficult to debug an issue with script execution as you will need to refer back to the source pipeline file to see exactly what script was executed.\n\n![CI log management - image 3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099525/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750099524656.png)\n\n## So what’s new?\n\nStarting in GitLab 16.7 and GitLab Runner 16.7, you can now enable a feature flag titled FF_SCRIPT_SECTIONS, which will add a collapsible output section to the CI job log for multi-line command script blocks. This feature flag changes the log output for CI jobs that execute within the Bash shell.\n\n![CI log management - image 4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099525/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750099524658.png)\n\nLine 17: Unlike the previous examples, the first thing you will notice in the screenshot above is that by default the log entry for the multi-line command is collapsed by default.\n\nSingle-line commands do not display in a collapsible element.\n\nFor multi-line scripts the multi-line command is now a collapsible element, so now, when you uncollapse the log entry for line 17, then the log will display all of the commands that were executed in the script block.\n\n![CI log management - image 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099525/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099524659.png)\n\nThere is also the [`custom collapsible section`](https://docs.gitlab.com/ee/ci/jobs/#custom-collapsible-sections) feature, which in combination with this new multi-command output capability does provide you additional flexibility for displaying log output in the UI. Here is how you can use the two features to change the log output. \n\n```\n## A pipeline with a multi-line command in the script block for the build-job\n\nvariables:\n  FF_PRINT_POD_EVENTS: \"true\"\n  FF_USE_POWERSHELL_PATH_RESOLVER: \"true\"\n  FF_SCRIPT_SECTIONS: \"true\"\n\ncollapsible_job_multiple:\n  stage: build\n  script:\n    - |\n      echo \"{\n        'test': 'data',\n        'test2': 'data2',\n      }\"\n    - |\n      echo \"{\n        'test': 'data',\n        'test2': 'data2',\n      }\"\n    - echo -e \"\\033[0Ksection_start:`date +%s`:my_first_section\\r\\033[0KHeader of the 1st collapsible section\"\n    - echo 'this line should be hidden when collapsed'\n    - |\n      echo \"{\n        'test': 'data',\n        'test2': 'data2',\n      }\"\n    - echo -e \"\\033[0Ksection_start:`date +%s`:second_section\\r\\033[0KHeader of the 2nd collapsible section\"\n    - echo 'this line should be hidden when collapsed'\n    - echo -e \"\\033[0Ksection_end:`date +%s`:second_section\\r\\033[0K\"\n    - echo -e \"\\033[0Ksection_end:`date +%s`:my_first_section\\r\\033[0K\"\n\n```\n\nIf you were to run this pipeline with the FF_SCRIPT_SECTIONS feature flag set to false, then the log output would be as depicted in the following screenshot.\n\n![CI log management - image 5](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099524/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099524661.png)\n\nBut, if you were to run this pipeline with the FF_SCRIPT_SECTIONS feature flag set to true, then the log output would be as depicted in the following screenshot.\n\n![CI log management - image 6](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099525/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750099524663.png)\n\n## What about other shells?\n\nAs of the 16.7 release, the collapsible output section in the CI job log for multi-line command script blocks is only visible for CI/CD jobs that are executed with the Bash shell. CI/CD jobs executed with Powershell is not currently supported. We plan to add this [capability](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4494) in a future release. \n\n## What are our future plans?\n\nA few features are still needed to improve the CI/CD job log output, and the `timestamp` for each log line is one of them. This addition will add missing features such as command/section duration.\n\n> To learn more about GitLab CI/CD features, refer to the official [CI/CD documentation](https://docs.gitlab.com/ee/ci/index.html). \n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[832,937,110,726],{"slug":1226,"featured":92,"template":678},"inside-the-improved-ci-logs-management-experience-for-multi-line-commands","content:en-us:blog:inside-the-improved-ci-logs-management-experience-for-multi-line-commands.yml","Inside The Improved Ci Logs Management Experience For Multi Line Commands","en-us/blog/inside-the-improved-ci-logs-management-experience-for-multi-line-commands.yml","en-us/blog/inside-the-improved-ci-logs-management-experience-for-multi-line-commands",{"_path":1232,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1233,"content":1239,"config":1244,"_id":1246,"_type":16,"title":1247,"_source":17,"_file":1248,"_stem":1249,"_extension":20},"/en-us/blog/introducing-the-gitlab-ci-cd-catalog-beta",{"title":1234,"description":1235,"ogTitle":1234,"ogDescription":1235,"noIndex":6,"ogImage":1236,"ogUrl":1237,"ogSiteName":692,"ogType":693,"canonicalUrls":1237,"schema":1238},"Introducing the GitLab CI/CD Catalog Beta","Discover, reuse, and contribute CI/CD components effortlessly, enhancing collaboration and efficiency when creating pipeline configurations.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099399/Blog/Hero%20Images/Blog/Hero%20Images/security-pipelines_4UHVIJlePT8rEzjvYkGYvi_1750099398604.jpg","https://about.gitlab.com/blog/introducing-the-gitlab-ci-cd-catalog-beta","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing the GitLab CI/CD Catalog Beta\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2023-12-21\",\n      }",{"title":1234,"description":1235,"authors":1240,"heroImage":1236,"date":1241,"body":1242,"category":14,"tags":1243},[1019],"2023-12-21","DevSecOps is all about speed – achieving rapid progress in software development. To succeed in DevSecOps, organizations require a well-functioning CI/CD pipeline that teams can utilize to automate their development workflows.\n\nHowever, crafting pipeline configurations with YAML can be intricate and challenging because YAML isn't a programming language, Developers may find themselves reinventing the wheel each time they try to create new configurations because they don't have visibility into existing configurations and work that others may have already done, resulting in inefficiency.\n\n[GitLab 16.7](https://about.gitlab.com/releases/2023/12/21/gitlab-16-7-released/) introduces the [CI/CD Catalog](https://docs.gitlab.com/ee/ci/components/#cicd-catalog) (Beta), with the goal of enhancing developer efficiency by addressing three main questions developers encounter when creating pipeline configurations:\n\n* Discoverability: Has someone already created a configuration for my task, and where can I find it?\n* Reusability: Once I find a suitable pipeline, how do I use it effectively?\n* Ease of contribution: I've created a useful configuration; how can I easily share it with the GitLab community?\n\n## What is the GitLab CI/CD Catalog?\n\nThe CI/CD Catalog serves as a centralized hub for developers and organizations to share pre-existing [CI/CD components](https://docs.gitlab.com/ee/ci/components/) and to discover reusable configurations that others may have already developed. Every component published by users will be part of a public catalog accessible to all users, regardless of their organization or project. \n\nThis approach promotes cross-organization collaboration, allowing the entire GitLab community to benefit from the wealth of CI components available. It's a powerful step forward in sharing knowledge among GitLab users, enabling developers to harness the collective expertise of the platform.\n\n## Easy component creation and publishing\n\nIn addition to reusing components, developers can contribute to the GitLab CI/CD community by creating their own components and publishing them in the catalog. This ensures that others can benefit from their expertise and encourages collaboration across the platform.\n\n## How to discover and use components\n\n**1. Opening the CI/CD Component Catalog**\n\nClick on “Search or go to...”\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099406962.png)\n\nOpen the catalog by navigating to “Explore > CI/CD Catalog” or visit this [catalog page](https://gitlab.com/explore/catalog).\n\nUpon accessing the catalog, you'll find a list of CI/CD components projects contributed by your team, organization, or the wider GitLab community.\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750099406963.png)\n\n**2. Browsing components**\n\nNavigate through the list of components in the CI/CD Catalog or use the Search bar to find components related to a specific topic.\n\nEach component project contains one or multiple components. Opening a component project will display its documentation, providing details on all available components. This includes insights into how to use each component and understanding the expected input parameters.\n\n**3. Include the selected components in your .gitlab-ci.yml**\n\nNow that you've explored the catalog and selected the desired CI/CD components, integrate them into your project's CI/CD pipeline.\n\nFollow these steps to update your .gitlab-ci.yml file:\n\n1. Open the .gitlab-ci.yml file in your project for editing.\n2. Use the include keyword to add the selected components to your CI configuration. \n3. Ensure that the paths to the component YAML files are correct and specify the appropriate version for each component.\n4. In case the components have input parameters, review the component’s documentation to understand which inputs are required, and add them to your CI configuration.\n5. Save and commit your changes to the .gitlab-ci.yml file.\n\nHere is an example of YAML code that demonstrates how to include a few components and use them with input parameters.\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_1.15.48_PM_aHR0cHM6_1750099406965.png)\n\n## How to create and publish components\n\nHave you crafted a valuable configuration that you'd like to share and contribute to your team or the GitLab community? Here are the six steps to make it happen:\n\n**Step 1: Create a new project and set it as a component project**\n\n1. On the left sidebar, select **Search or go to** and find your project.\n2. On the left sidebar, select Settings > General.\n3. Expand Visibility, project features, permissions.\n4. Scroll down to CI/CD Catalog resource and select the toggle to set the project as a CI/CD Catalog resource.\n5. Ensure that your project description is filled out; this information will be showcased in the catalog, providing users with insights into the purpose and functionality of your components.\n6. Create a .gitlab-ci.yml file in the root of the repository. You will need this file to test and release the components as described in steps 4 and 5 below. Note: This step only needs to be done once for any project that contains components.\n\n**Step 2: Create the components**\n\n1. Create a /templates folder in the root directory of the project.\n2. In this templates directory, create one YAML template file (ending in .yml) for each component.   \n3. The template can optionally include a description of input arguments using the `spec` keyword if the component requires input parameters, and the definition of jobs, that may include references to values using the interpolation format $[[ inputs.input-name ]]. Ensure you use three dash lines between the spec header, and job definitions.\n\nHere is an example of a `deploy.yml` template that gets input parameters:\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_11.34.20_AM_aHR0cHM6_1750099406966.png)\n\nIn this template, we've defined two input parameters, `stage` and `environment`, both with default values. In the content section, a job is defined that interpolates these input arguments.\n\n**Step 3: Create components documentation** \n\nCreate a README.md file in the root of the project, including information about the components. Explain the component's functionality, detail input parameters, and provide illustrative examples. This ensures clarity for component consumers on how to use them.\n\nThis is an example of component documentation:\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750099406967.png)\n\nAdditional information can be found in our [CI/CD components](https://docs.gitlab.com/ee/ci/components/index.html#components-repository) documentation. \n\n**Step 4: Add tests to the components (recommended)**\n\nDeveloping a component follows a standard software development cycle with stages like build, test, and deploy. It's highly recommended to test your components before publishing them. Check out this example test, which queries the GitLab REST API to check whether a component job has been added to the pipeline. Feel free to use it, and consider adding more tests to ensure your components work as expected.\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_12.32.53_PM_aHR0cHM6_1750099406968.png)\n\nInclude all your test jobs in the **.gitlab-ci.yml** file in your Catalog project.\n\n**Step 5: Prepare your CI/CD configuration for publishing**\n\n1. Create a release job in the **.gitlab-ci.yml** file in the component project using the `Release` keyword.  See the job example:\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_12.34.27_PM_aHR0cHM6_1750099406969.png)\n\n__Note:__ Do not \"create release\" from GitLab UI since this soon won't be supported for a Component Catalog.\n\n2. We recommend adding this rule in the Release job; this will automatically trigger the Release job only when creating a git tag starts with digits in the project, following semantic release conventions (1.0.0 for example).\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_1.21.30_PM_aHR0cHM6_1750099406970.png)\n\n3. So this is how we recommend your job to look: \n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_12.37.09_PM_aHR0cHM6_1750099406970.png)\n\n4. To manually release components, add manual rule as below, so when the pipeline is triggered, someone will need to manually run the release job. \n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_12.38.18_PM_aHR0cHM6_1750099406971.png)\n\nHere is the release job with the `when:manual` rule:\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_12.41.00_PM_aHR0cHM6_1750099406972.png)\n\n**Step 6: Publish your components**\n\nOnce you are satisfied with your components, and all tests have passed successfully, it's time to publish a new version by creating a git tag, so they will be available in the CI/CD Catalog.\n\n1. Create a Git tag using the semantic versioning format \"MAJOR.MINOR.PATCH\". \n\n2. You can create tags through the UI by navigating to Code -> Tags -> New Tag, or via the CLI using `git tag`. \n\n3. Creating the tag will trigger a pipeline that runs the Release job if all tests pass successfully. The component project will then be assigned the version you defined in the tag, and it will appear in the catalog.\n\n### Example projects\n\n* [GitLab official components](https://gitlab.com/components)\n\n### Documentation \n\nFor more details on using components from the CI/CD Catalog and maximizing their potential within your projects, refer to the official [CI/CD Catalog documentation](https://docs.gitlab.com/ee/ci/components/#cicd-catalog). This documentation provides in-depth insights into the functionality.\n\n> [Take a tour](https://gitlab.navattic.com/cicd-catalog) of the GitLab CI/CD Catalog.\n\n_A special thank you to [Dov Hershkovitch](https://about.gitlab.com/company/team/#dhershkovitch) and [Fabio Pitino](https://gitlab.com/fabiopitino) for their invaluable content reviews and contributions to this blog post._",[110,676,894,726],{"slug":1245,"featured":6,"template":678},"introducing-the-gitlab-ci-cd-catalog-beta","content:en-us:blog:introducing-the-gitlab-ci-cd-catalog-beta.yml","Introducing The Gitlab Ci Cd Catalog Beta","en-us/blog/introducing-the-gitlab-ci-cd-catalog-beta.yml","en-us/blog/introducing-the-gitlab-ci-cd-catalog-beta",{"_path":1251,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1252,"content":1258,"config":1266,"_id":1268,"_type":16,"title":1269,"_source":17,"_file":1270,"_stem":1271,"_extension":20},"/en-us/blog/how-we-user-research-transformed-gitlab-runner-fleet-dashboard-visibility-and-metrics",{"title":1253,"description":1254,"ogTitle":1253,"ogDescription":1254,"noIndex":6,"ogImage":1255,"ogUrl":1256,"ogSiteName":692,"ogType":693,"canonicalUrls":1256,"schema":1257},"GitLab Runner Fleet dashboard improved through user research","Learn how GitLab user research drives the product development process when enabling more runner fleet features.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666543/Blog/Hero%20Images/lightvisibility.png","https://about.gitlab.com/blog/how-we-user-research-transformed-gitlab-runner-fleet-dashboard-visibility-and-metrics","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How user research transformed GitLab Runner Fleet dashboard visibility and metrics\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gina Doyle\"}],\n        \"datePublished\": \"2023-11-07\",\n      }",{"title":1259,"description":1254,"authors":1260,"heroImage":1255,"date":1262,"body":1263,"category":14,"tags":1264},"How user research transformed GitLab Runner Fleet dashboard visibility and metrics",[1261],"Gina Doyle","2023-11-07","\nContinuous integration and continuous deployment (CI/CD) are a crucial part of the product development workflow. Companies depend on CI/CD to get new software features, bug fixes, and improvements out the door quickly. At GitLab, runners are at the core of CI/CD and are needed to build, test, and deploy code. [GitLab Runner](https://gitlab.com/gitlab-org/gitlab-runner) is the open source project that is used to run CI/CD jobs and send the results back to GitLab. However, since GitLab's early years, GitLab Runner has been code-centric with limited UI capabilities. We recently embarked on a journey to change that – follow along to see how we gathered user input and made desired improvements to the visibility and metrics of the GitLab Runner Fleet dashboard.\n\n## Managing runners\nAs GitLab scaled as a company, so did the number of GitLab users with complex and evolving use cases. In the past five years, we have seen a radical increase in the need for a best-in-class experience when managing a large number of self-managed runners. This need has led us to put more time and focus into improving how GitLab manages runners and how it supports users in making decisions quickly and effectively.\n\nTo that end, we’ve been making incremental changes to the runner fleet management experience, including improving the general usability of admin and group runner pages, providing more data around runners such as jobs run and status checks, and improving the runner creation process so it’s more secure and easier to follow. By doing this, we built a better underlying system so we could add new features easily.\n\nHowever, runner admins and platform engineers shared this recurring problem with us: \n- It is difficult to get an at-a-glance view of my fleet of runners, including how they are performing (how fast they pick up jobs, which ones are running the most jobs, etc.) and what issues (if any) are present that need to be fixed. \n\nIn addition to this problem, the GitLab Runner Fleet team was also running into issues with the performance of runner pages and with scalability when trying to add new features. This was a perfect opportunity to learn more about the problem users were facing and to innovate to extend our runner offering.\n\n## Gathering insights and exploring proposals\nTo fully understand the problem at hand and help make the requirements more clear, we carried out [problem validation](https://about.gitlab.com/handbook/product/ux/ux-research/problem-validation-and-methods/) research. We held [moderated in-depth interviews](https://www.usability.gov/how-to-and-tools/methods/individual-interviews.html) and sifted through much of our existing data from previous interviews. As we gained confidence in our understanding of the problem, we created a first iteration of the design to be tested with users through [moderated usability testing](https://about.gitlab.com/handbook/product/ux/ux-research/usability-testing/#different-types-of-usability-testing), which would [determine whether the solution really did solve the problem](https://about.gitlab.com/handbook/product/ux/ux-research/solution-validation-and-methods/).\n\nThis first design proposal focused on: \n- a general overview of the fleet, broken down by types (instance, group, project runners) and status\n- visibility into runner system failures\n- a general concept of runner load - how many jobs are running at once out of how many possible jobs the runner can run?\n- how long it takes for runners to pick up jobs\n= a list of runner events (job failures, status changes, upgrades, etc.)\n\n![Initial design of dashboard 1](https://about.gitlab.com/images/blogimages/2023-11-01-how-we-used-research-to-provide-visibility-into-runner-fleets/initial-design-1.png)\n\n\n![Initial design of dashboard 2](https://about.gitlab.com/images/blogimages/2023-11-01-how-we-used-research-to-provide-visibility-into-runner-fleets/initial-design-2.png)\n\n\n## Testing the usability of iteration\nWe ran moderated usability testing sessions so we could measure user responses and satisfaction based on a set of consistent questions across multiple participants. We used a Figma prototype and had participants complete tasks that connected back to the problem we were solving. \n\nAn advantage of running moderated sessions compared to unmoderated sessions is that we could tailor our follow-up questions as required once participants completed a task or provided an answer. After completing these sessions, we summarized the data we received into the following key insights to create the MVC (minimal viable change) of the runner fleet dashboard:\n1. Runner failures/errors are crucial to identify problems (voted the most important feature on the dashboard).\n2. Online and offline runners matter the most in terms of status breakdowns for a fleet.\n3. Visibility into busy runners (tied for second most important feature on the dashboard) helps users see individual runner load.\n4. Wait time to pick up a job was tied for the second most important feature on the dashboard and seeing this over time with more configuration options can help identify where to make optimizations in the fleet.\n\nThere are many other features requested by participants that should be handled in follow-up iterations of the dashboard. See [this epic](https://gitlab.com/groups/gitlab-org/-/epics/10631) for more information.\n\n## Updating the designs\nOur next step was to update the designs to consider the research we ran.\n\n### Responding to feedback\n\n1) Wait times\n\n**What we heard:**\n- “Right now, there is very little information available as to how soon a CI build might start. Oftentimes, users are left wondering why jobs won’t run.” \n- “It's mostly reactive for us at this point anyway when, as you know, we get users reporting problems, we might want to go look at wait times here. And be able to dig down on those to see who's waiting...”\n\n**What we did:**\n- Added an in-depth visualization of wait times for all instance runners in the fleet in the past three hours and included percentiles to give users a true representation of the wait times. By providing the data over this interval, we enable runner admins to quickly get a sense of how their runners are performing and if there are any issues with the fleet that would cause jobs to stay in pending state.\n\n![Wait time graph](https://about.gitlab.com/images/blogimages/2023-11-01-how-we-used-research-to-provide-visibility-into-runner-fleets/wait-time-graph.png)\n\n2) Runner loads\n\n**What we heard:**\n- “I have three build servers that are shared amongst many projects and in order for me to ensure each build server is properly set up, it's important for me to track builds by server. So, if one particular server is having issues, I need to be able to focus on that server.”\n\n**What we did:**\n- To start indicating some data on runner load, we’ve added a list of the top five busiest runners based on the number of running jobs they have at the moment, ranked from highest to lowest. This should help when analyzing concurrency settings and seeing if runners really need the capacity set for them.\n\n![Active runners](https://about.gitlab.com/images/blogimages/2023-11-01-how-we-used-research-to-provide-visibility-into-runner-fleets/active-runners.png)\n\n3) Understanding of most recent failures\n\n**What we heard:**\n- “We actually have a dashboard on Datadog that gives us error counts and errors coming from the runners themselves. But you know, without a dashboard, we have no visibility on anything inside of GitLab, like queue lengths or wait times or anything like that.”\n\n- “Our setup is not perfect...some of the runners run on spot instances and can disappear, which means the background engine can die. You get this very strange error that the job failed because of something and we need to retry the job using a different runner.”\n\n**What we did:**\n- Created a list of most recent failures in the last hour for instance runners. Not only can you quickly navigate to the job log and details, but you’re also given a short summary of the error so you get insight into it immediately and can get on your way to fix it.\n\n![Runner failures](https://about.gitlab.com/images/blogimages/2023-11-01-how-we-used-research-to-provide-visibility-into-runner-fleets/runner-failures.png)\n\n**The full dashboard:**\n\n![Full runner dashboard](https://about.gitlab.com/images/blogimages/2023-11-01-how-we-used-research-to-provide-visibility-into-runner-fleets/full-dashboard.png)\n\n## What's next?\nThis first iteration of the dashboard is not the end. We have many iterations planned to improve the dashboard over the next year. To first get feedback on how it works for users, we will run an [Early Adopters Program](https://gitlab.com/groups/gitlab-org/-/epics/11180) for GitLab Ultimate self-managed users. We will work with teams to set up the feature on their instance and continuously ask for feedback once it is being used. This will also help us understand user satisfaction levels and help our team prioritize fixes and new features as we continue improving the experience.\n\n**Do you want to provide feedback now?** We would love to hear what you think! Please add your thoughts about the Fleet Dashboard to [this feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/421737). To learn more about how we built this dashboard, [watch this technical demo](https://www.youtube.com/watch?v=clyfLsss-vM) by Miguel Rincon, Pedro Pombeiro, and Vladimir Shushlin.\n",[110,1265,959],"research",{"slug":1267,"featured":6,"template":678},"how-we-user-research-transformed-gitlab-runner-fleet-dashboard-visibility-and-metrics","content:en-us:blog:how-we-user-research-transformed-gitlab-runner-fleet-dashboard-visibility-and-metrics.yml","How We User Research Transformed Gitlab Runner Fleet Dashboard Visibility And Metrics","en-us/blog/how-we-user-research-transformed-gitlab-runner-fleet-dashboard-visibility-and-metrics.yml","en-us/blog/how-we-user-research-transformed-gitlab-runner-fleet-dashboard-visibility-and-metrics",{"_path":1273,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1274,"content":1280,"config":1287,"_id":1289,"_type":16,"title":1290,"_source":17,"_file":1291,"_stem":1292,"_extension":20},"/en-us/blog/rearchitecting-git-object-database-mainentance-for-scale",{"title":1275,"description":1276,"ogTitle":1275,"ogDescription":1276,"noIndex":6,"ogImage":1277,"ogUrl":1278,"ogSiteName":692,"ogType":693,"canonicalUrls":1278,"schema":1279},"Why and how we rearchitected Git object database maintenance for scale","Go in-depth into improvements to maintenance of the Git object database for reduced overhead and increased efficiency.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664413/Blog/Hero%20Images/speedlights.png","https://about.gitlab.com/blog/rearchitecting-git-object-database-mainentance-for-scale","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why and how we rearchitected Git object database maintenance for scale\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Patrick Steinhardt\"}],\n        \"datePublished\": \"2023-11-02\",\n      }",{"title":1275,"description":1276,"authors":1281,"heroImage":1277,"date":1283,"body":1284,"category":14,"tags":1285},[1282],"Patrick Steinhardt","2023-11-02","\n[Gitaly](/direction/gitaly/#gitaly-1), the service that is responsible for providing access to Git repositories in GitLab, needs to ensure that the repositories are maintained regularly. Regular maintenance ensures:\n\n- fast access to these repostiories for users\n- reduced resource usage for servers\n\nHowever, repository maintenance is quite expensive by itself and especially so for large monorepos.\n\nIn [a past blog post](/blog/scaling-repository-maintenance/), we discussed how we revamped the foundations of repository maintenance so that we can iterate on the exact maintenance strategy more readily. This blog post will go through improved maintenance strategies for objects hosted in a Git repository, which was enabled by that groundwork.\n\n- [The object database](#the-object-database)\n- [The old way of packing objects](#the-old-way-of-packing-objects)\n- [All-into-one repacks](#all-into-one-repacks)\n- [Deletion of unreachable objects](#deletion-of-unreachable-objects)\n- [Reachability checks](#reachability-checks)\n- [The new way of packing objects](#the-new-way-of-packing-objects)\n- [Cruft packs](#cruft-packs)\n- [More efficient incremental repacks](#more-efficient-incremental-repacks)\n- [Geometric repacking](#geometric-repacking)\n- [Real-world results](#real-world-results)\n\n## The object database\n\nWhenever a user makes changes in a Git repository, these changes come in the form of new objects written into the repository. Typically, any such object is written into the repository as a so-called \"loose object,\" which is a separate file that contains the compressed contents of the object itself with a header that identifies the type of the object.\n\nTo demonstrate this, in the following example we use\n[`git-hash-object(1)`](https://www.git-scm.com/docs/git-hash-object) to write a new blob into the repository:\n\n```shell\n $ git init --bare repository.git\nInitialized empty Git repository in /tmp/repository.git/\n $ cd repository.git/\n $ echo \"contents\" | git hash-object -w --stdin\n12f00e90b6ef79117ce6e650416b8cf517099b78\n $ tree objects\nobjects\n├── 12\n│   └── f00e90b6ef79117ce6e650416b8cf517099b78\n├── info\n└── pack\n\n4 directories, 1 file\n```\n\nAs you can see, the new object was written into the repository and stored as a separate file in the objects database.\n\nOver time, many of these loose objects will accumulate in the repository. Larger repositories tend to have millions of objects, and storing all of them as separate files is going to be inefficient. To ensure that the repository can be served efficiently to our users and to keep the load on servers low, Git will regularly compress loose objects into packfiles. We can compress loose objects manually by using, for example, [`git-pack-objects(1)`](https://www.git-scm.com/docs/git-pack-objects):\n\n```shell\n $ git pack-objects --pack-loose-unreachable ./objects/pack/pack \u003C/dev/null\nEnumerating objects: 1, done.\nCounting objects: 100% (1/1), done.\nWriting objects: 100% (1/1), done.\nTotal 1 (delta 0), reused 0 (delta 0), pack-reused 0\n7ce39d49d7ddbbbbea66ac3d5134e6089210feef\n $ tree objects\n objects/\n├── 12\n│   └── f00e90b6ef79117ce6e650416b8cf517099b78\n├── info\n│   └── packs\n└── pack\n    ├── pack-7ce39d49d7ddbbbbea66ac3d5134e6089210feef.idx\n    └── pack-7ce39d49d7ddbbbbea66ac3d5134e6089210feef.pack\n```\n\nThe loose object was compressed into a packfile (`.pack`) with a packfile index (`.idx`) that is used to efficiently access objects in that packfile.\n\nHowever, the loose object still exists. To remove it, we can execute [`git-prune-packed(1)`](https://www.git-scm.com/docs/git-prune-packed) to delete all objects that have been packed already:\n\n```shell\n $ git prune-packed\n $ tree objects/\nobjects/\n├── info\n│   └── packs\n└── pack\n    ├── pack-7ce39d49d7ddbbbbea66ac3d5134e6089210feef.idx\n    └── pack-7ce39d49d7ddbbbbea66ac3d5134e6089210feef.pack\n```\n\nFor end users of Git, all of this happens automatically because Git calls `git gc --auto` regularly. This command uses heuristics to figure out what needs to be optimized and whether loose objects need to be compressed into packfiles. This command is unsuitable for the server side because:\n\n- The command does not scale well enough in its current form. The Git project must be more conservative about changing defaults because they support a lot of different use cases. Because we know about the specific needs that we have at GitLab, we can adopt new features that allow for more efficient maintenance more readily.\n- The command does not provide an easy way to observe what exactly it is doing, so we cannot provide meaningful metrics.\n- The command does not allow us to fully control all its exact inner workings and so is not flexible enough.\n\nTherefore, Gitaly uses its own maintenance strategy to maintain Git repositories, of which maintaining the object database is one part.\n\n## The old way of packing objects\n\nAny maintenance strategy to pack objects must ensure the following three things to keep a repository efficient and effective with disk space:\n\n- Loose objects must be compressed into packfiles.\n- Packfiles must be merged into larger packfiles.\n- Objects that are not reachable anymore must be deleted eventually.\n\nPrevious to GitLab 16.0, Gitaly used the following three heuristics to ensure that those three things happened:\n\n- If the number of packfiles in the repository exceeds a certain threshold, Gitaly rewrote all packfiles into a single new packfile. Any objects that were unreachable were put into loose files so that they could be deleted after a certain grace period.\n- If the number of loose objects exceeded a certain threshold, Gitaly compressed all reachable loose objects into a new packfile.\n- If the number of loose objects that are older than the grace period for object deletion exceeded a certain threshold, Gitaly deleted those objects.\n\nWhile these heuristics satisfy all three requirements, they have several downsides, especially in large monorepos that contain gigabytes of data.\n\n### All-into-one repacks\n\nFirst and foremost, the first heuristic requires us to do all-into-one repacks where all packfiles are regularly compressed into a single packfile. In Git repositories with high activity levels, we usually create lots of packfiles during normal operations. But because we need to limit the maximum number of packfiles in a repository, we need to regularly do these complete rewrites of all objects.\n\nUnfortunately, doing such an all-into-one repack can be prohibitively expensive in large monorepos. The repacks may allocate large amounts of memory and typically keep multiple CPU cores busy during the repack, which can require hours of time to complete.\n\nSo, ideally, we want to avoid these all-into-one repacks to the best extent possible.\n\n### Deletion of unreachable objects\n\nTo avoid certain race conditions, Gitaly and Git enforce a grace period before an unreachable object is eligible for deletion. This grace period is tracked using the access time of such an unreachable object: If the last access time of the object is earlier than the grace period, the unreachable object can be deleted.\n\nTo track the access time of a single object, the object must exist as a loose object. This means that all objects that are pending deletion will be evictedfrom any packfile they were previously part of and become loose objects.\n\nBecause the grace period we have in place for Gitaly is 14 days, large monorepos tend to grow a large number of such loose object that are pending deletion. This has two effects:\n\n- The number of loose objects overall grows, which makes object lookup less efficient.\n- Loose objects are stored a lot less efficiently than packed objects, which means that the disk space required for the objects that are pending deletion is signficantly higher than if those objects were stored in their packed form.\n\nIdeally, we would be able to store unreachable objects in packed format while still being able to store their last access times separately.\n\n### Reachability checks\n\nCompressing loose objects into a new packfile is done by using an incremental repack. Git will compute the reachability of all objects in the repository and then pack all loose objects that are reachable into a new packfile.\n\nTo determine reachability of an object, we have to perform a complete graph walk. Starting at all objects that are directly referenced, we walk down any links that those objects have to any other objects. Once we reach the root of the object graph, we have then split all objects into two sets, which are the reachable and unreachable objects.\n\nThis operation can be quite expensive and the larger the repository and the more objects it contains, the more expensive this computation gets. As mentioned above though, objects which are about to be deleted need to be stored\nas loose objects such that we can track their last access time. So if our incremental repack compressed all loose objects into a packfile regardless of their reachability, then this would impact our ability to track the grace\nperiod per object.\n\nThe ideal solution here would avoid doing reachability checks altogether while still being able to track the grace period of unreachable objects which are pending deletion individually.\n\n## The new way of packing objects\n\nOver the past two years, the Git project has shipped multiple mechanisms that allow us to address all of these painpoints we had with our old strategy. These new mechanisms come in two different forms:\n\n- Geometric repacking allows us to merge multiple packfiles without having to rewrite all packfiles into one. This feature was introduced in [Git v2.32.0](https://gitlab.com/gitlab-org/git/-/commit/2744383cbda9bbbe4219bd3532757ae6d28460e1).\n- Cruft packs allow us to store objects that are pending deletion in compressed format in a packfile. This feature was introduced in [Git v2.37.0](https://gitlab.com/gitlab-org/git/-/commit/a50036da1a39806a8ae1aba2e2f2fea6f7fb8e08).\n\nThe Gitaly team has reworked the object database maintenance strategy to make use of these new features.\n\n### Cruft packs\nPrevious to Git v2.37.0, pruning objects with a grace period required Git to first unpack packed objects into loose objects. We did this so that we can track the per-object access times for unreachable objects that are pending deletion as explained above. This is inefficient though as it potentially requires us to keep a lot of unreachable objects in loose format until they can be deleted after the grace period.\n\nWith Git v2.37.0, [git-repack(1)](https://www.git-scm.com/docs/git-repack) learned to write [cruft packs](https://git-scm.com/docs/cruft-packs). While a cruft pack looks just like a normal pack, it also has an accompanying\n`.mtimes` file:\n\n```shell\n$ tree objects/\nobjects/\n├── info\n│   └── packs\n└── pack\n    ├── pack-7ce39d49d7ddbbbbea66ac3d5134e6089210feef.idx\n    ├── pack-7ce39d49d7ddbbbbea66ac3d5134e6089210feef.mtimes\n    └── pack-7ce39d49d7ddbbbbea66ac3d5134e6089210feef.pack\n```\n\nThis file contains per-object timestamps that record when the object was last accessed. With this, we can continue to track per-object grace periods while storing the objects in a more efficient way compared to loose objects.\n\nIn Gitaly, we [started to make use of cruft packs](https://gitlab.com/gitlab-org/gitaly/-/merge_requests/5454) in GitLab 15.10 and made the feature generally available in GitLab 15.11. Cruft packs allow us to store objects that are pending deletion more efficiently and with less impact on the overall performance of the repository.\n\n### More efficient incremental repacks\n\nCruft packs also let us fix the issue that we had to do reachability checks when doing incremental repacks.\n\nPreviously, we had to always ensure reachability when packing loose objects so that we don't pack objects that are pending deletion. But now that any such object would be stored as part of a cruft pack and not as a loose pack anymore, we can instead compress all loose files into a packfile. This change was [introduced into Gitaly](https://gitlab.com/gitlab-org/gitaly/-/merge_requests/5660) with GitLab 16.0.\n\nIn an artificial benchmark with the Linux repository, compressing all loose objects into a packfile led to more than a 90-fold speedup, dropping from almost 13 seconds to 174 milliseconds.\n\n### Geometric repacking\n\nLast but not least, we still have the issue that we need to perform regular all-into-one repacks when we have too many packfiles in the repository.\n\nGit v2.32.0 introduced a new \"geometric\" repacking strategy for the [git-repack(1)](https://www.git-scm.com/docs/git-repack) command that will merge multiple packfiles into a single, larger packfile, that we can use to solve this issue.\n\nThis new \"geometric\" strategy tries to ensure that existing packfiles in the repository form a [geometric sequence](https://en.wikipedia.org/wiki/Geometric_progression) where each successive packfile contains at least `n` times as many objects as the preceding packfile. If the sequence isn't maintained, Git will determine a slice of packfiles that it must repack to maintain the sequence again. With this process, we can limit the number of packfiles that exist in the repository without having to repack all objects into a single packfile regularly.\n\nThe following figures demonstrate geometric repacking with a factor of two.\n\n1. We notice that the two smallest packfiles do not form a geometric sequence as they both contain two objects each.\n\n![Geometrically repacking packfiles, initial](https://about.gitlab.com/images/blogimages/2023-10-09-repository-scaling-odb-maintenance/geometric-repacking-1.png)\n\n1. We identify the smallest slice of packfiles that need to be repacked in order to restore the geometric sequence. Merging the smallest two packfiles would lead to a packfile with four objects. This would not be sufficient to restore the geometric sequence as the next-biggest packfile contains four objects, as well.\n\nInstead, we need to merge the smallest three packfiles into a new packfile that contains eight objects in total. As `8 × 2 ≤ 16` the geometric sequence is restored.\n\n![Geometrically repacking packfiles, combining](https://about.gitlab.com/images/blogimages/2023-10-09-repository-scaling-odb-maintenance/geometric-repacking-2.png)\n\n3. We merge those packfiles into a new packfile.\n\n![Geometrically repacking packfiles, final](https://about.gitlab.com/images/blogimages/2023-10-09-repository-scaling-odb-maintenance/geometric-repacking-3.png)\n\nOriginally, we introduced this new feature [into Gitaly in GitLab 15.11](https://gitlab.com/gitlab-org/gitaly/-/merge_requests/5590).\n\nUnfortunately, we had to quickly revert this new mode. It turned out that the geometric strategy was not ready to handle Git repositories that had an alternate object database connected to them. Because we make use of this feature to [deduplicate objects across forks](https://docs.gitlab.com/ee/development/git_object_deduplication.html), the new repacking strategy led to problems.\n\nAs active contributors to the Git project, we set out to fix these limitations in git-repack(1) itself. This led to an [upstream patch series](http://public-inbox.org/git/a07ed50feeec4bfc3e9736bf493b9876896bcdd2.1680606445.git.ps@pks.im/T/#u) that fixed a bunch of limitations around alternate object directories when doing geometric repacks in Git that was then released with Git v2.41.\n\nWith these fixes upstream, we were then able to\n[reintroduce the change](https://gitlab.com/gitlab-org/gitaly/-/merge_requests/5607) and [globally enable our new geometric repacking strategy](https://gitlab.com/gitlab-org/gitaly/-/merge_requests/5745) with GitLab 16.0.\n\n## Real-world results\n\nAll of this is kind of dry and deeply technical. What about the real-world results?\n\nThe following graphs show the global time we spent repacking objects across all projects hosted on GitLab.com.\n\n![Time spent optimizing repositories globally](https://about.gitlab.com/images/blogimages/2023-10-09-repository-scaling-odb-maintenance/global-optimization.png)\n\nThe initial rollout was on April 26 and progressed until April 28. As you can see, there was first a significant increase in repacking time. But after the initial dust settles, we can see that globally the time we spent repacking repositories roughly decreased by almost 20%.\n\nIn the two weeks before we enabled the feature, during weekdays and at peak times we were usually spending around 2.6 days per 12 hours repacking. In the two weeks after the feature was enabled, we spent around 2.12 days per 12 hours\nrepacking objects.\n\nThis is a success by itself already, but the more important question is how it would impact large monorepos, which are significantly harder to keep well-maintained due to their sheer size. Fortunately, the effect of the new housekeeping strategy was a lot more significant here. The following graph shows the time we spent performing housekeeping tasks in our own `gitlab-org` and `gitlab-com` groups, which host some of the most active repositories that have caused issues in the past:\n\n![Time spent optimizing repositories in GitLab groups](https://about.gitlab.com/images/blogimages/2023-10-09-repository-scaling-odb-maintenance/gitlab-groups-optimization.png)\n\nIn summary, we have observed the following improvements:\n\n|                                                        | Before              | After                | Change |\n| ------------------------------------------------------ | ------------------- | -------------------- | ------ |\n| Global accumulated repacking time                      | ~5.2 hours/hour     | ~4.2 hours/hour      | -20%   |\n| Large repositories of gitlab-org and gitlab-com groups | ~0.7-1.0 hours/hour | 0.12-0.15 hours/hour | -80%   |\n\nWe have heard of other customers that saw similar improvements in highly active large monorepositories.\n\n## Manually enable geometric repacking\n\nWhile the new geometric repacking strategy has been default-enabled starting with GitLab 16.0, it was introduced with GitLab 15.11. If you want to use the\nnew geometric repacking mode, you can opt in by setting the\n`gitaly_geometric_repacking` feature flag. You can do so via the `gitlab-rails`\nconsole:\n\n```\nFeature.enable(:gitaly_geometric_repacking)\n```\n",[702,1286,704,703],"production",{"slug":1288,"featured":6,"template":678},"rearchitecting-git-object-database-mainentance-for-scale","content:en-us:blog:rearchitecting-git-object-database-mainentance-for-scale.yml","Rearchitecting Git Object Database Mainentance For Scale","en-us/blog/rearchitecting-git-object-database-mainentance-for-scale.yml","en-us/blog/rearchitecting-git-object-database-mainentance-for-scale",{"_path":1294,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1295,"content":1301,"config":1308,"_id":1310,"_type":16,"title":1311,"_source":17,"_file":1312,"_stem":1313,"_extension":20},"/en-us/blog/access-token-lifetime-limits",{"title":1296,"description":1297,"ogTitle":1296,"ogDescription":1297,"noIndex":6,"ogImage":1298,"ogUrl":1299,"ogSiteName":692,"ogType":693,"canonicalUrls":1299,"schema":1300},"Why GitLab access tokens now have lifetime limits","Pre-existing and new personal, group, or project access tokens now have enforced lifetime limits. Find out why and learn how to minimize disruption.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662877/Blog/Hero%20Images/security-cover-new.png","https://about.gitlab.com/blog/access-token-lifetime-limits","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why GitLab access tokens now have lifetime limits\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Hannah Sutor\"}],\n        \"datePublished\": \"2023-10-25\",\n      }",{"title":1296,"description":1297,"authors":1302,"heroImage":1298,"date":1304,"body":1305,"category":14,"tags":1306},[1303],"Hannah Sutor","2023-10-25","***Update July 2024 - For self-managed customers only: There have been modifications to the upgrade path since this blog post was originally published. Users upgrading after July 24, 2024, from a pre-16.0 version of GitLab to the latest patch release of Version 16.0 or later will not have an expiration date set for tokens that didn't have one previously.***\n\n***Update May 2024 - The removal of support for non-expiring access tokens was first announced in [September 2022](https://docs.gitlab.com/ee/update/deprecations.html#non-expiring-access-tokens), enacted in GitLab 16.0 (May 2023), as noted in this article, and went into effect for GitLab.com customers on May 14, 2024. If you recently started seeing a large number of 401s or authentication issues on your API calls, this may be due to expired access tokens. For GitLab self-managed customers, tokens without an expiration date will be set to expire one year from the date of upgrading your instance to GitLab 16.0. If you encounter this situation, please see the guidance in our documentation on [how to identify and extend affected tokens](https://docs.gitlab.com/ee/security/token_overview.html#troubleshooting).***\n\nBalance security and ease of use. It sounds so simple, right? Anyone who has ever implemented security controls knows that this balance is a delicate one, and one that may never be fully achieved, since people may have different tolerance levels.\n\nAt GitLab, we are no exception. In the [Authentication group](https://about.gitlab.com/direction/govern/authentication/), we try to provide a toolbox of access and security controls that GitLab administrators can implement to their liking, recognizing that everyone sits at a different place on the security vs. accessibility spectrum. There are times, however, where we have to make decisions about what access mechanisms we offer to our customers, including those related to powerful, long-lived credentials and their lifecycles. These credentials can often be created and left unchanged for years, with potential exposure in logs, configurations, and to people working on those tools. If leaked, they can cause irreparable harm to an organization's security posture.\n\n## Our decision to remove support for non-expiring access tokens\n\nIn GitLab 16.0, we made the decision to remove support for non-expiring access tokens. This was first announced in 15.4 — you can read the [removal announcement here](https://docs.gitlab.com/ee/update/deprecations.html#non-expiring-access-tokens). As of the 16.0 milestone (May 2023), we applied an expiration date of May 14, 2024, to any personal, group, or project access token that previously didn't have one. Any access token that _already had_ an expiration, even if it was outside of the 365-day limit, was left untouched.\n\n**Starting on May 15, 2023, any new access token created must have an expiration within 365 days of creation.**\n\nIn GitLab Ultimate, administrators have the [ability to set a custom allowable limit](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#when-personal-access-tokens-expire) for token expiration. This policy allows administrators to set a lifetime less than 365 days for compliance purposes. In Premium and Free tiers, tokens must be set to expire within 365 days.\n\n## What is the impact?\nIf you have automation that relies on a personal, group, or project access token, and you don't modify its expiration date, it will stop working whenever it hits the expiration date. If you previously did not set an expiration date for your tokens, they are now set for no earlier than May 14, 2024. Unless you extend the token lifetime and/or rotate the token, your automation will stop working on that day.\n\nWe recognize that this may be a disruptive change. This article is meant to raise awareness for our customers in advance of May 14, 2024. \n\n## Why are we making this change?\nIt all started with an [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/348660) suggested by our internal application security team, which led us to populate some security-conscious defaults for access tokens: the least amount of privilege by default and a 30-day expiry date. Users could always change them if they wanted to.\n\nWe had already [enforced an expiry date for OAuth tokens](https://about.gitlab.com/blog/gitlab-releases-15-breaking-changes/#oauth-tokens-without-an-expiration) in GitLab 15.0. Our application security team recommended that we enforce an expiration date for personal, project, and group access tokens as well. Long-lived, static secrets should have enforced lifetime limits as a best security practice. Hence, the need for putting in place these limits. If a token didn't have an expiration date, we placed a one-year expiration on the token as of our 16.0 release in May 2023. This means that tokens will expire in May 2024 if they are not rotated and/or have a modified expiration date beforehand.\n\n## How to minimize the impact \nYou're reading this blog post now, so hopefully you're ahead of the potential impacts that a change like this can cause. The sections below will detail how you can keep GitLab running smoothly.\n\n### Know what you have\nBe proactive. Start by doing an audit of all of your tokens. If you're an Ultimate customer, you can use the [credentials inventory](https://docs.gitlab.com/ee/administration/credentials_inventory.html) (available in self-managed only) to see all personal, project, and group access tokens in your instance. \n\nIf you don't have access to the credentials inventory, you can:\n- [View the active personal tokens](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#view-the-last-time-a-token-was-used) under **Access tokens** from the left navigation.\n- List [personal](https://docs.gitlab.com/ee/api/personal_access_tokens.html#list-personal-access-tokens), [project](https://docs.gitlab.com/ee/api/project_access_tokens.html#list-project-access-tokens), or [group](https://docs.gitlab.com/ee/api/group_access_tokens.html) access tokens using the API. Administrators can query tokens created by all users while individual users can view only tokens created by themselves. \n\nIf you're a GitLab administrator, communicate with your end users about this change coming to their personal access tokens, and how you would like them to manage expiration in the future. You can link them to this blog post.\n\n### Use the rotation API\nWe released a [token rotation API](https://docs.gitlab.com/ee/api/personal_access_tokens.html#rotate-a-personal-access-token) that revokes the previous token and creates a new token that expires in one week.\n\nWe also implemented [automatic token reuse detection](https://docs.gitlab.com/ee/api/personal_access_tokens.html#automatic-reuse-detection) for increased security. Automatic reuse detection is a defense-in-depth security measure that can help prevent attackers from using leaked access tokens and the token rotation API from maintaining indefinite access to a user's account by rotating expiring leaked tokens to get new tokens, indefinitely. \n\nTo briefly describe how automatic token reuse detection works, let's describe a scenario where a legitimate user has accidentally disclosed their personal access token (AT1) publicly. An attacker stumbles on this leaked access token (AT1), uses AT1 and the token rotation endpoint to get a new access token (AT2) to continue maintaining access to the user's account. The legitimate user, unaware of the AT1 leak or the attacker's access, tries to use AT1 and the token rotation API to get a new access token (AT3, in their mind) for themselves. However, since AT1 is being used on the token rotation endpoint twice, the backend detects this reuse and infers that this reuse could be due to a token leak. Because it has no way of knowing if it is the attacker or the legitimate user that is making the request to the token rotation API, in the interest of securing access to the user's account, the latest active token in the token family, AT2, is revoked, thus preventing attacker's access to the user's account.\n\nAs a consequence of reuse detection, token rotation must be executed with attention to potential concurrency issues. It is recommended not to call the token rotation API multiple times with the same access token. Otherwise, automatic reuse detection may immediately revoke the entire token family, as a security measure, as described above.\n\n### Manually set an expiration date\nYou can use the UI to delete an existing access token and create a new one with a designated expiration date. Make sure you swap the new token in your automation. Expiration dates of existing tokens cannot be modified in the UI, so if you want to set an expiration date that is further out, you'll need to generate a new token.\n\n### Watch your notifications\n\nOur team has implemented email notifications for expiring personal, group, and project access tokens. These notifications are structured as follows:\n\n- You get an email notification when your token expires in 7 days.\n- Another email is sent one day before expiry.\n- Each individual token triggers its own separate email.\n\nGroup owners and project maintainers will receive expiration notifications for group access tokens and project access tokens respectively. For personal access tokens, individual users will get the email.\n\n## Service accounts for automation use cases\nFor automation use cases that currently use group or project access tokens, we suggest that you look into [service accounts](https://docs.gitlab.com/ee/user/profile/service_accounts.html), available on GitLab Premium and Ultimate tiers. These accounts do not use a licensed seat and are not able to access the GitLab UI using the interactive login. They also have a distinct membership type, making them simple to track. Combined with _optional_ token lifetime limits ([coming soon](https://gitlab.com/gitlab-org/gitlab/-/issues/421420)), this means you could set them to never expire (although we encourage you to still be mindful of security best practices).\n\n## What's next\nThe next step is for you to share this information with your teams and determine how this change impacts your own environment. Please follow the links we've provided throughout the blog to make the necessary changes to your project, group, and personal access tokens.\n",[675,1307],"security",{"slug":1309,"featured":6,"template":678},"access-token-lifetime-limits","content:en-us:blog:access-token-lifetime-limits.yml","Access Token Lifetime Limits","en-us/blog/access-token-lifetime-limits.yml","en-us/blog/access-token-lifetime-limits",{"_path":1315,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1316,"content":1322,"config":1329,"_id":1331,"_type":16,"title":1332,"_source":17,"_file":1333,"_stem":1334,"_extension":20},"/en-us/blog/microcks-and-gitlab-part-one",{"title":1317,"description":1318,"ogTitle":1317,"ogDescription":1318,"noIndex":6,"ogImage":1319,"ogUrl":1320,"ogSiteName":692,"ogType":693,"canonicalUrls":1320,"schema":1321},"Speed up API and microservices delivery with Microcks and GitLab - Part 1","Learn how to configure Microcks for GitLab and what the use cases are for this open source Kubernetes-native tool.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683021/Blog/Hero%20Images/lightsticks.png","https://about.gitlab.com/blog/microcks-and-gitlab-part-one","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Speed up API and microservices delivery with Microcks and GitLab - Part 1\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Madou Coulibaly\"}],\n        \"datePublished\": \"2023-09-27\",\n      }",{"title":1317,"description":1318,"authors":1323,"heroImage":1319,"date":1325,"body":1326,"category":14,"tags":1327},[1324],"Madou Coulibaly","2023-09-27","\n\nAPI development is all the rage these days for customer and partner integration, frontend-to-backend communication, microservices orchestration, and more. Yet APIs have their challenges, including how to create a fast feedback loop on design, how different teams can work with autonomy without having to wait for each other's API implementation, and how to cope with backward compatibility tests when shipping newer versions of the API. \n\n[Microcks](https://microcks.io), an open source, Kubernetes-native tool for API mocking and testing, addresses these challenges. With Microcks, which is accepted as a Sandbox project in the [Cloud Native Computing Foundation](https://cncf.io), developers can leverage their [OpenAPI](https://www.openapis.org/), [GraphQL](https://graphql.org/), [gRPC](https://grpc.io/), [AsyncAPI](https://www.asyncapi.com/), and [Postman Collection](https://www.postman.com/collection/) assets to quickly mock and simulate APIs before writing them. Couple Microcks with GitLab and you have a powerful combination to foster collaboration, encourage rapid changes, and provide a robust delivery platform for API-based applications.\n\nIn this ongoing blog series, we will introduce you to Microcks use cases and how they fit with the GitLab platform. We'll also discuss technical integration points that will help ease the developer burden, including identity management, Git repositories, and pipeline integrations.\n\n## What is Microcks?\nMicrocks addresses two major use cases: \n- **Simulating (or mocking) an API or a microservice** from a set of descriptive assets. This can be done as soon as you start the design phase to set up a feedback loop very quickly, or later on to ease the pain of provisioning environments with a lot of dependencies.\n- **Validating the conformance of your application regarding your API specification** by running contract-test. This validation can be integrated into your CI/CD pipeline so that conformance can be checked on each and every iteration. This is of great help to enforce backward compatibility of your API of microservices interfaces.\n\nMicrocks offers a uniform and consistent approach for the various kinds of request/response APIs (REST, GraphQL, gRPC, Soap) and event-driven APIs (currently supporting eight different protocols), thereby bringing consistency for users and for automations all along your API lifecycle.\n\n## How Microcks fits into the software development lifecycle\nMicrocks is a solution based on containers and can be deployed in several configurations. It can be deployed on the developer laptop through [Docker](https://microcks.io/documentation/installing/docker-compose/), [Podman](https://microcks.io/documentation/installing/podman-compose/) or [Docker Desktop Extension](https://microcks.io/documentation/installing/docker-desktop-extension/) to assist with mocking complex environments. When it comes to team collaboration, Microcks can be deployed as a centralized instance that connects to the Git repositories of the organization, discovers the API artifacts, and then provides shared up-to-date API simulations.\n\n![diagram of how Microcks fits into development lifecycle](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/microcks.png){: .shadow.small.center}\n\nTo ease the burden on developers (and administrators), Microcks can be configured to use your GitLab platform as an identity provider. With that configuration, integrating Microcks is seamless, and API simulations are automatically shared among development teams. Microcks fosters collaboration by providing everyone with the same “source of truth” and avoiding drift risks. The tool can also be used to lower the pain and the cost of deploying and maintaining complex QA environments because simulations are inexpensive to deploy or redeploy on-demand. Microcks deployment follows a GitOps approach.\n\nBeyond this sharing of simulations, Microcks also integrates well with CI/CD pipelines. As you release API-based applications, there is always concern about conformance of the contractualized expectations you defined using specifications like OpenAPI, GraphQL, and the like. Usually, the hardest part isn't delivering the `1.0` of this API; problems come later when you're trying to deliver the `1.3`. This latest version must still be backward compatible with the 1.0 contract if you don't want to make your consumers angry and frustrated.\n\nThis conformance validation is very well assured by Microcks using contract-testing principles. So we encourage you to plug Microcks into some `test` related jobs in your GitLab pipeline and delegate this conformance validation to your Microcks instance.\n\n![microcks-in-gitlab-workflow](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/microcks-in-gitlab-workflow.png){: .shadow.medium.center}\n\n\nEmbedding Microcks conformance testing in your pipeline is actually easy thanks to our lightweight CLI that you'll integrate in pipeline jobs. You can choose to reuse an existing Microcks instance to record results and keep history of your success or pop up a new ephemeral instance as it's lightweight and fast to bootstrap.\n\n## How to set up GitLab as an identity provider in Microcks\n\nTo start off this series, we will detail how to configure Microcks to use your GitLab platform as an identity provider. This is in fact very easy as authentication in Microcks is based on [Keycloak](https://keycloak.org) (another CNCF project) and GitLab can be set as an identity provider in Keycloak (see [official documentation](https://www.keycloak.org/docs/latest/server_admin/index.html#gitlab)).\n\n**Note:** This configuration is optional as Microcks can use any other identity provider Keycloak integrates with.\n\nKeycloak is a very common solution that may be deployed already at your organization. If not, Microcks comes with a Keycloak distribution that is pre-configured for its usage with a realm called `microcks`. We have used this realm to validate this configuration.\n\n### Create a GitLab Group Application\nThe first thing is to create a new [Group Application](https://docs.gitlab.com/ee/integration/oauth_provider.html#create-a-group-owned-application) on your GitLab instance as follows:\n- `Name`: `microcks-via-keycloak`\n- `Redirect URI`: `https://keycloak.acme.org/realms/microcks/broker/gitlab/endpoint`\n- `Scopes`: `read_user`, `openid`, `profile` and `email`\n\n![gitlab-application-form](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/gitlab-application-form.png){: .shadow.medium.center}\n\n\nThis application uses your Keycloak instance with `https://keycloak.acme.org/realms/microcks/broker/gitlab/endpoint` as the redirect URI. As a result, we obtain an `Application ID` and an associated `Secret` we have to keep aside for the next step.\n\n![gitlab-application](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/gitlab-application.jpeg){: .shadow.medium.center}\n\n\n### Add GitLab as identity provider in Keycloak\nThe next step takes place in the Keycloak admin console. Once the correct `microcks` realm is selected, you'll just have to go to the **Identity providers** section and add a GitLab provider. Simply paste here the `Application ID` you got earlier as `Client ID` and the `Secret` as `Client Secret`. You can also choose a `Display order` if you plan to have multiple identity providers.\n\n![keycloak-identity-provider](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/keycloak-identity-provider.jpg){: .shadow.medium.center}\n\n\nThen, from the **Authentication** section in the admin console, choose the browser flow and configure the `Identity Provider Redirector` as follows:\n\n- `Alias`: `GitLab`\n- `Default Identify Provider`: `gitlab`\n\n![keycloak-redirector](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/keycloak-redirector.jpg){: .shadow.medium.center}\n\n### Test your Microcks configuration\nNow open the Microcks URL into your browser and you'll be directly redirected to the GitLab login page. Enter your GitLab credentials and you will be authenticated and redirected to Microcks. \n\n![microcks-homepage](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/microcks-homepage.jpeg){: .shadow.medium.center}\n\n## What's next?\nIn upcoming blogs, we'll detail how GitLab can be used in the two major use cases for Microcks. We'll see how Microcks integrates with GitLab Git repositories to discover API specifications and produce simulations, and how to integrate Microcks conformance tests into your GitLab CI/CD pipelines.\n\n_[Laurent Broudoux](https://www.linkedin.com/in/laurentbroudoux/) is a cloud-native architecture expert and enterprise integration problem lover. He has helped organizations in adopting distributed and cloud paradigms while capitalizing on their critical existing assets. He is the founder and lead developer of the [Microcks.io](https://microcks.io/) open-source project: a Kubernetes-native tool for API mocking and testing. For this, he is using his 10+ years experience as an architect in financial services where he defined API transformation strategies, including governance and delivery process._\n\n_[Madou Coulibaly](https://gitlab.com/madou) is a senior solutions architect at GitLab._\n",[771,1328,110,873,232],"testing",{"slug":1330,"featured":6,"template":678},"microcks-and-gitlab-part-one","content:en-us:blog:microcks-and-gitlab-part-one.yml","Microcks And Gitlab Part One","en-us/blog/microcks-and-gitlab-part-one.yml","en-us/blog/microcks-and-gitlab-part-one",{"_path":1336,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1337,"content":1342,"config":1348,"_id":1350,"_type":16,"title":1351,"_source":17,"_file":1352,"_stem":1353,"_extension":20},"/en-us/blog/exporting-vulnerability-reports-to-html-pdf-jira",{"title":1338,"description":1339,"ogTitle":1338,"ogDescription":1339,"noIndex":6,"ogImage":1298,"ogUrl":1340,"ogSiteName":692,"ogType":693,"canonicalUrls":1340,"schema":1341},"How to export vulnerability reports to HTML/PDF and Jira","With GitLab's API, it's easy to query vulnerability info and send the report details elsewhere, such as a PDF file or a Jira project.","https://about.gitlab.com/blog/exporting-vulnerability-reports-to-html-pdf-jira","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to export vulnerability reports to HTML/PDF and Jira\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Siddharth Mathur\"}],\n        \"datePublished\": \"2023-09-14\",\n      }",{"title":1338,"description":1339,"authors":1343,"heroImage":1298,"date":1344,"body":1345,"category":14,"tags":1346},[1200],"2023-09-14","\nGitLab's [Vulnerability Report](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/) makes it easy to triage security scan results without ever having to leave the platform. You can manage your code, run security scans against it, and fix vulnerabilities all in one place. That being said, some teams prefer to manage their vulnerabilities in a separate tool like Jira. They may also need to present the vulnerability report to leadership in a digestible format.\n\nOut of the box, GitLab's Vulnerability Report can be [exported to CSV](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/#export-vulnerability-details) with a single click, for easy analysis in other tools. In some cases though, a simple PDF of the report is all that's needed. \n\nWith [GitLab's API](https://docs.gitlab.com/ee/api/graphql/reference/index.html#queryvulnerabilities), it's easy to query vulnerability info and send the report details elsewhere, such as a PDF file or a Jira project. In this blog, we'll show you how to export to HTML/PDF and Jira. **Note that the scripts used in this tutorial are provided for educational purposes and they are not supported by GitLab.**\n\n## Exporting to HTML/PDF\nTo export your vulnerability reports to HTML or PDF, head to the [Custom Vulnerability Reporting](https://gitlab.com/jwagner-demo/vandelay-industries/engineering/custom-vulnerability-reporting) project. \n\n![Project overview](https://about.gitlab.com/images/blogimages/2023-07-27-exporting-vulnerability-reports-to-html-pdf-and-jira/project_overview.png)\n\n\nThis project contains a script that queries a project's vulnerability report, and then generates an HTML file from that data. The pipeline configured in the project runs this script and converts the HTML file to PDF as well.\n\nTo use the exporter, first [fork the project](https://gitlab.com/jwagner-demo/vandelay-industries/engineering/custom-vulnerability-reporting/-/forks/new) or [import it into a new project](https://gitlab.com/projects/new#import_project) (select “Repository by URL” and paste the git URL of the original project).\n\n![Project import](https://about.gitlab.com/images/blogimages/2023-07-27-exporting-vulnerability-reports-to-html-pdf-and-jira/project_import.png)\n\n\nSet the CI/CD variables as described in the readme. You'll need the following from GitLab:\n- GitLab project/personal access token with permissions to access vulnerability info (read_api scope)\n- GitLab GraphQL API URL (for SaaS this is https://gitlab.com/api/graphql)\n- GitLab project path (e.g. smathur/custom-vulnerability-reporting)\n\nAfter you've set the required CI/CD variables, manually run a pipeline from your project's Pipelines page. Once the pipeline is complete, you'll see your file export by going to the “build_report” (for HTML) or “pdf_conversion” job and selecting “Download” or “Browse” on the sidebar under \"Job artifacts.\" And there you have it! A shareable, easy-to-read export of your project's vulnerabilities.\n\n![PDF export](https://about.gitlab.com/images/blogimages/2023-07-27-exporting-vulnerability-reports-to-html-pdf-and-jira/pdf_export.png)\n\n\n## Exporting vulnerability info to Jira\nGitLab lets you create Jira tickets from vulnerabilities through the UI using our [Jira integration](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#create-a-jira-issue-for-a-vulnerability). While you can do this individually for vulnerabilities that need actioning, sometimes teams need to bulk-create Jira tickets for all their vulnerabilities. We can leverage GitLab and Jira's APIs to achieve this.\n\nTo get started, head to the [External Vulnerability Tracking](https://gitlab.com/smathur/external-vulnerability-tracking) project. This script fetches vulnerabilities in the same way as the script above, but it uses the Jira API to create a ticket for each vulnerability. Each ticket's description is also populated with details from GitLab's vulnerability report.\n\nTo use the exporter, simply [fork the project](https://gitlab.com/smathur/external-vulnerability-tracking/-/forks/new) or [import it into a new project](https://gitlab.com/projects/new#import_project) (select “Repository by URL” and paste the git URL of the original project), and set the CI/CD variables as described in the readme. You'll need the following from GitLab:\n- GitLab project/personal access token with permissions to access vulnerability info (read_api scope)\n- GitLab GraphQL API URL (for SaaS this is https://gitlab.com/api/graphql)\n- GitLab project path (e.g. smathur/external-vulnerability-tracking)\n\nYou will also need the following from Jira:\n- Jira [personal access token](https://id.atlassian.com/manage-profile/security/api-tokens)\n- Jira API issue endpoint URL (for SaaS this is https://ORG_NAME.atlassian.net/rest/api/latest/issue/)\n- Jira user email ID\n- Jira project key where you want to create vulnerability tickets (e.g. ABC)\n\nOnce you have set your CI/CD variables as described in the project readme, simply run a pipeline from your project's Pipelines page, and watch as your tickets get created in Jira!\n\nIf you run the pipeline again in the future, the script will run a search query against your Jira project to prevent duplicate tickets from being created. It will create tickets for new vulnerabilities that aren't already in Jira.\n\n![Jira export](https://about.gitlab.com/images/blogimages/2023-07-27-exporting-vulnerability-reports-to-html-pdf-and-jira/jira_export.png)\n\n\n## References\n- [GitLab Vulnerability API](https://docs.gitlab.com/ee/api/graphql/reference/index.html#queryvulnerabilities)\n- [Custom Vulnerability Reporting project](https://gitlab.com/jwagner-demo/vandelay-industries/engineering/custom-vulnerability-reporting)\n- [External Vulnerability Tracking project](https://gitlab.com/smathur/external-vulnerability-tracking)\n- [Jira REST API examples](https://developer.atlassian.com/server/jira/platform/jira-rest-api-examples/)\n\n",[726,1347,1307,771],"collaboration",{"slug":1349,"featured":6,"template":678},"exporting-vulnerability-reports-to-html-pdf-jira","content:en-us:blog:exporting-vulnerability-reports-to-html-pdf-jira.yml","Exporting Vulnerability Reports To Html Pdf Jira","en-us/blog/exporting-vulnerability-reports-to-html-pdf-jira.yml","en-us/blog/exporting-vulnerability-reports-to-html-pdf-jira",{"_path":1355,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1356,"content":1362,"config":1367,"_id":1369,"_type":16,"title":1370,"_source":17,"_file":1371,"_stem":1372,"_extension":20},"/en-us/blog/hosting-vuejs-apps-using-gitlab-pages",{"title":1357,"description":1358,"ogTitle":1357,"ogDescription":1358,"noIndex":6,"ogImage":1359,"ogUrl":1360,"ogSiteName":692,"ogType":693,"canonicalUrls":1360,"schema":1361},"How to host VueJS apps using GitLab Pages","Follow this tutorial, including detailed configuration guidance, to quickly get your application up and running for free.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683489/Blog/Hero%20Images/hosting.png","https://about.gitlab.com/blog/hosting-vuejs-apps-using-gitlab-pages","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to host VueJS apps using GitLab Pages\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sophia Manicor\"},{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2023-09-13\",\n      }",{"title":1357,"description":1358,"authors":1363,"heroImage":1359,"date":1364,"body":1365,"category":14,"tags":1366},[849,789],"2023-09-13","\nIf you use VueJS to build websites, then you can host your website for free with GitLab Pages. This short tutorial walks you through a simple way to host and deploy your VueJS applications using GitLab CI/CD and GitLab Pages.\n\n## Prequisites\n- A VueJS application\n- Working knowledge of GitLab CI\n- 5 minutes\n\n## Setting up your VueJS application\n\n1) Install vue-cli.\n\n```bash\nnpm install -g @vue/cli\n# OR\nyarn global add @vue/cli\n```\nYou can check you have the right version of Vue with:\n\n```bash\nvue --version\n```\n\n2) Create your application using:\n\n```bash\nvue create name-of-app\n```\n\nWhen successfully completed, you will have a scaffolding of your VueJS application.\n\n## Setting up .gitlab-ci.yml for GitLab Pages\nBelow is the [GitLab CI configuration](https://gitlab.com/demos/applications/vuejs-gitlab-pages/-/blob/main/.gitlab-ci.yml) necessary to deploy to GitLab Pages. Put this file into your root project. GitLab Pages always deploys your website from a specific folder called `public`.\n\n```yaml\nimage: \"node:16-alpine\"\n\nstages:\n  - build\n  - test\n  - deploy\n\nbuild:\n  stage: build\n  script:\n    - yarn install --frozen-lockfile --check-files --non-interactive\n    - yarn build\n  artifacts:\n    paths:\n      - public\n\npages:\n  stage: deploy\n  script:\n    - echo 'Pages deployment job'\n  artifacts:\n    paths:\n      - public\n  only:\n    - main\n\n```\n\n## Vue config (vue.config.js)\nIn Vue, the artifacts are built in a folder called dist, in order for GitLab to deploy to Pages, we need to change the path of the artifacts. One way to do this is by changing the [Vue config file](https://gitlab.com/demos/applications/vuejs-gitlab-pages/-/blob/main/vue.config.js), `vue.config.js`.\n\n```\nconst { defineConfig } = require('@vue/cli-service')\n\nfunction publicPath () {\n  if (process.env.CI_PAGES_URL) {\n    return new URL(process.env.CI_PAGES_URL).pathname\n  } else {\n    return '/'\n  }\n}\n\nmodule.exports = defineConfig({\n  transpileDependencies: true,\n  publicPath: publicPath(),\n  outputDir: 'public'\n})\n```\n\nHere we have set `outputDir` to `public` so that GitLab will pick up the build artifacts and deploy to Pages. Another important piece when creating this configuration file is to change the `publicPath`, which is the base URL your application will be deployed at. In this case, we have create a function `publicPath()` that checks if the CI_PAGES_URL environment variable is set and returns the correct base URL.\n\n## Run GitLab CI\n\n![vuejs-gitlab-pages-pipeline](https://about.gitlab.com/images/blogimages/2023-05-11-hosting-vuejs-apps-using-gitlab-pages/vuejs-gitlab-pages-pipeline.png){: .shadow}\n\n\n## Check Pages to get your URL\n\n![gitlab-pages-domain](https://about.gitlab.com/images/blogimages/2023-05-11-hosting-vuejs-apps-using-gitlab-pages/gitlab-page-domain.png){: .shadow}\n\nVoila! You have set up a VueJS project with a fully functioning CI/CD pipeline. Enjoy your VueJS application hosted by GitLab Pages!\n\n## References\n- [https://cli.vuejs.org/guide/installation.html](https://cli.vuejs.org/guide/installation.html)\n- [https://cli.vuejs.org/guide/creating-a-project.html](https://cli.vuejs.org/guide/creating-a-project.html)\n- [https://gitlab.com/demos/applications/vuejs-gitlab-pages](https://gitlab.com/demos/applications/vuejs-gitlab-pages)\n\n",[110,726,832,937],{"slug":1368,"featured":6,"template":678},"hosting-vuejs-apps-using-gitlab-pages","content:en-us:blog:hosting-vuejs-apps-using-gitlab-pages.yml","Hosting Vuejs Apps Using Gitlab Pages","en-us/blog/hosting-vuejs-apps-using-gitlab-pages.yml","en-us/blog/hosting-vuejs-apps-using-gitlab-pages",{"_path":1374,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1375,"content":1381,"config":1386,"_id":1388,"_type":16,"title":1389,"_source":17,"_file":1390,"_stem":1391,"_extension":20},"/en-us/blog/cascading-merge-requests-with-gitlab-flow",{"title":1376,"description":1377,"ogTitle":1376,"ogDescription":1377,"noIndex":6,"ogImage":1378,"ogUrl":1379,"ogSiteName":692,"ogType":693,"canonicalUrls":1379,"schema":1380},"How to adopt a cascading merge request strategy with GitLab Flow","This tutorial explains how to consolidate updates in a single branch and propagate them to other branches using ucascade bot.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679851/Blog/Hero%20Images/cascade.jpg","https://about.gitlab.com/blog/cascading-merge-requests-with-gitlab-flow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to adopt a cascading merge request strategy with GitLab Flow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Madou Coulibaly\"}],\n        \"datePublished\": \"2023-08-31\",\n      }",{"title":1376,"description":1377,"authors":1382,"heroImage":1378,"date":1383,"body":1384,"category":14,"tags":1385},[1324],"2023-08-31","\nGit offers a range of branching strategies and workflows that can be utilized to enhance organization, efficiency, and code quality. Employing a well-defined workflow helps foster a successful and streamlined development process. By implementing the [release branches using GitLab Flow](https://docs.gitlab.com/ee/topics/gitlab_flow.html#release-branches-with-gitlab-flow), you can effectively handle multiple product releases. However, when it comes to fixing bugs, it often becomes necessary to apply the fix across various stable branches such as `main`,  `stable-1.0`, `stable-1.1`, and `stable-2.0`. The process of applying the fix to multiple locations can be time-consuming, as it involves the manual creation of multiple merge requests.\n\nBy consolidating updates in a single branch and propagating them to other branches, the cascading merge approach establishes a central source of truth, reducing confusion and maintaining consistency. In this blogpost, we will guide you through setting up this approach for your GitLab project using [ucascade bot](https://github.com/unblu/ucascade).\n\n## Getting started\nTo get started, you'll need the following prerequisites:\n\n### Environment\n  - a GitLab project that implemented [Release Branches Strategy](https://docs.gitlab.com/ee/topics/gitlab_flow.html#release-branches-with-gitlab-flow)\n  - a Kubernetes cluster\n\n### CLI\n  - git\n  - kubectl\n  - docker\n\n### Project access tokens\nFollow the instructions on the [Project access tokens page](https://docs.gitlab.com/ee/user/project/settings/project_access_tokens.html#create-a-project-access-token) to create two project access tokens –`ucascade` and `ucascade-approver` – with the API scope in your GitLab project.\n\n![project access tokens](https://about.gitlab.com/images/blogimages/2023-06-22-cascading-merge-requests-with-gitlab-flow/pat.png){: .shadow.medium}\n\n## Deploy ucascade bot on Kubernetes\nFirst, create the `bots-fleet` namespace on Kubernetes.\n\n```\nkubectl create namespace bots-fleet\n```\n\nThen, create the `cascading-merge-secret` secret that contains the GitLab project access tokens created previously.\n\n```\nkubectl create secret generic cascading-merge-secret -n bots-fleet \\\n--from-literal=gitlab-host=https://gitlab.com \\\n--from-literal=gitlab-api-token=\u003CUCASCADE_PROJECT_ACCESS_TOKEN> \\\n--from-literal=gitlab-api-token-approver=\u003CAPPROVER_BOT_PROJECT_ACCESS_TOKEN>\n```\n\nOnce done, (fork and) clone the [Cascading Merge repository](https://gitlab.com/madou-stories/bots-fleet/cascading-merge) that contains the Kubernetes manifests for the bot and replace the `host` field in the `kube/ingress.yaml` file according to your Kubernetes domain.\n\n```yaml\napiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  annotations:\n    kubernetes.io/ingress.class: nginx\n  name: ucascade\n  namespace: bots-fleet\nspec:\n  rules:\n  - host: ucascade.\u003CKUBERNETES_BASED_DOMAIN>\n    http:\n      paths:\n      - backend:\n          service:\n            name: ucascade\n            port:\n              number: 80\n        path: /\n        pathType: Prefix\n\n``` \n\nNow, you are ready to deploy the `ucascade` bot.\n\n```\nkubectl apply -f kube/\n```\n\nYou should see the following resources deployed on Kubernetes:\n\n![ucascade-k8s](https://about.gitlab.com/images/blogimages/2023-06-22-cascading-merge-requests-with-gitlab-flow/ucascade-k8s.png){: .shadow.medium}\n\n**Note:** The `ucascade` image is based on the [ucascade-bot](https://github.com/unblu/ucascade-bot) and is located in the [Container Registry](https://gitlab.com/madou-stories/bots-fleet/cascading-merge/container_registry) of the Cascading Merge repository.\n{: .note}\n\n## Create a GitLab webhook\nFollow the instructions on [the Webhooks page](https://docs.gitlab.com/ee/user/project/integrations/webhooks.html#configure-a-webhook-in-gitlab) to create a webhook with the following variables: \n  - **URL**: `\u003CUCASCADE_INGRESS_URL>/ucascade/merge-request`\n  - **Trigger**: `Merge request events`\n\n![webhook](https://about.gitlab.com/images/blogimages/2023-06-22-cascading-merge-requests-with-gitlab-flow/webhook.png){: .shadow.medium}\n\n## Configure your Cascading Merge rule\nCreate a file called ucascade.json at the root level of your GitLab project as defined in [configuration file](https://unblu.github.io/ucascade/tech-docs/11_ucascade-configuration-file.html#_configuration_file) and matched with your release definition.\n\n![configuration](https://about.gitlab.com/images/blogimages/2023-06-22-cascading-merge-requests-with-gitlab-flow/configuration.png){: .shadow.medium}\n\n## Testing the Cascading Merge\nNow create a branch and an MR from your default branch, make a change, and merge it. The ucascade bot will propagate the change to all other release branches by automatically creating cascading MRs. The following video demonstrates the process:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Ej7xf8axWMs\" title=\"Cascading Merge Approach\"\n  frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n# Additional resources\nFind more information about the `ucascade` bot in the [ucascade documentation](https://unblu.github.io/ucascade/index.html).\n\n_Special thank you to Jérémie Bresson for authoring and open sourcing this amazing bot!_\n",[110,1084,702,726],{"slug":1387,"featured":92,"template":678},"cascading-merge-requests-with-gitlab-flow","content:en-us:blog:cascading-merge-requests-with-gitlab-flow.yml","Cascading Merge Requests With Gitlab Flow","en-us/blog/cascading-merge-requests-with-gitlab-flow.yml","en-us/blog/cascading-merge-requests-with-gitlab-flow",{"_path":1393,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1394,"content":1400,"config":1406,"_id":1408,"_type":16,"title":1409,"_source":17,"_file":1410,"_stem":1411,"_extension":20},"/en-us/blog/remote-design-sprints",{"title":1395,"description":1396,"ogTitle":1395,"ogDescription":1396,"noIndex":6,"ogImage":1397,"ogUrl":1398,"ogSiteName":692,"ogType":693,"canonicalUrls":1398,"schema":1399},"How to facilitate remote design sprints","Use these tips to help solve big design problems with stakeholders across multiple time zones.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683129/Blog/Hero%20Images/remotedesign.png","https://about.gitlab.com/blog/remote-design-sprints","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to facilitate remote design sprints\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emily Bauman\"}],\n        \"datePublished\": \"2023-08-23\",\n      }",{"title":1395,"description":1396,"authors":1401,"heroImage":1397,"date":1403,"body":1404,"category":14,"tags":1405},[1402],"Emily Bauman","2023-08-23","Recently, our research showed that our [Environments feature](https://about.gitlab.com/handbook/engineering/development/ops/deploy/environments/), which is part of the [Deploy stage](https://about.gitlab.com/direction/#devsecops-stages) of the software development lifecycle, was experiencing lower adoption rates and facing some usability challenges. Leaning on the evidence, [Viktor Nagy](https://gitlab.com/nagyv-gitlab), product manager for Environments, and I soon realized that we needed to look beyond a few small fixes and rethink our direction. We needed a design sprint. Below we share the process for creating your own remote design sprint.\n\n## What is a design sprint?\nDesign sprint is a term most people working in tech have heard in passing, but the meaning and purpose behind running one is often lost. A design sprint is a process for solving big problems through design, prototyping, and assessing ideas with customers. It's a method for developing a hypothesis, prototyping an idea, and testing it rapidly with as little investment as possible. Essentially, it's a great tool to align a team under a common goal, and answer the question: Are we on the right track to making a product that users will want to use?\n\nObvious benefits apart, why would a team want to spend the time going through this process? There are multiple selling points, but the main one is they help reduce time and money spent during the product lifecycle. A design sprint is a time-boxed way to get clear answers before investing in any development resources. It also brings the team together and gets everyone on the same page from the very beginning. This helps move the project forward even after the sprint concludes. \n\n## How we run remote design sprints\n[Jake Knapp](https://jakeknapp.com/sprint) created the design sprint process at Google in 2010, and during his time there he refined the process to be what it is today. Design sprints were originally designed to take place in person over five days, but over the past few years they have gone through continuous adjustments and refinements to adapt to remote practices. A more recent example being the four-day sprint we ran with the team.\n\n![](https://about.gitlab.com/images/blogimages/designsprint-diagram.png)\nDesign sprint diagram showing the four-day breakdown\n\nThe big question here is how do we go about developing a process for GitLab that works across time zones, runs partially asynchronously, and works remotely? \n\nDesign sprints were originally run in a conference room, with everyone together. If you needed an answer, the facilitator was right there at the front, able to answer questions or help with activities. Things get significantly more complicated when everyone is located on different continents. But with all this, we managed to figure out a successful process through a bit of trial and error, and some of the following tips will help anyone run a successful sprint in a remote setting. \n\n### 1. Thorough planning is the secret ingredient\nEven an in-person, fully synchronous design sprint requires preparation. In a well-planned design sprint, the process does most of the heavy lifting and gets you the right results in the end. So, when it comes to running a sprint that plays across time zones, remotely and asynchronously, the importance of planning increases tenfold. \n\nThe first thing a team needs to do before starting a design sprint is to answer some important questions:\n- What is the problem for the customer/user?\n- Why is it important for the business/technology?\n- What evidence do we have that this is a problem worth solving?\n- What research insights do we already have about the design problem?\n\nWith answers to all these questions, the team now has established goals and objectives to sprint towards. The clarity around this ensures everyone starts on the same page, and is working toward a common purpose.  \n\n### 2. Set the time expectations\nDesign sprints can be demanding in terms of the mental capacity and attention participants are required to dedicate to them. Advance capacity planning helps participants to be more present and engaged, and to bring their best ideas to the table. This is only possible if they account for the time required to spend on the sprint in advance. It also gives the facilitator a chance to answer any questions related to the sprint and set the expectations ahead of time.  \n\nPart of this includes understanding how the team's time zones can impact asynchronous activities. It is good to look into the following: \n- Review time zones and ensure sprint participants don't have to wake up too early or stay up too late. Sometimes this can be challenging and that's when leaning on the asynchronous aspect of communication is important. Tools like this [time zone converter](https://www.timeanddate.com/worldclock/converter.html) can help make this process easier.\n- Depending on how far time zones are spread, some people may finish their day hours before others even start. Therefore, a one-day window likely isn't enough of a time box for a task/activity. A practical window can span 48 hours in some cases, meaning each day of the design sprint could potentially take two days.\n- Ensure activities or announcements are assigned and communicated at the start of day in the earliest timezone. These are best shared both in Slack, and in the issue for the respective day. \n- Account for unforeseen reasons for participants' unavailability as there will always be aspects we cannot control. \n\n### Partnership is key\nRunning a design sprint is not a one-person job. To ensure smooth operation and get the best results, the product designer and product manager need to team up. A strong partnership between the two can make the process of planning and running a sprint less overwhelming. The split in responsibilities can look something like this:\n- Product can help define business and product goals, and reach out to users and team members to participate. \n- Design can help facilitate and plan the sprint, and guide ideation and prototyping. Design also can diligently plan for testing the concepts that come out of the sprint. \n\n### Tools and tips\nWith all the planning complete, the biggest task is to facilitate and guide the team through a sprint process. Running a sprint involves using various sets of tools for different activities to ensure everything runs smoothly. During the sprint with the Environments team we took advantage of the following:\n- GitLab issues to outline the activities and expectations for each day and serve as a single source of truth \n- Mural boards to collaborate on activities such as 'How Might We's', ideation, and prototyping\n- Zoom to meet synchronously, along with a Slack Channel for asynchronous updates\n- Google Drive to share files, such as the lightning talk recordings\n\nAs a facilitator, I also took advantage of GitLab's asynchronous culture to pre-record videos such as our Sprint Kickoff and Activity Walkthroughs so participants could go through these in their own time during each day.\n\n### Celebrate the wins\nOnce the sprint week has concluded and the team has landed on an experience or feature they want to move forward with, it's time to celebrate the wins! \n\nDesign sprints can be a lot of work, and it's great to look back on what all has been accomplished. Find ways to share those wins through team channels such as Slack and weekly meetings, or go even broader with blogs or social media posts. Who knows, this might also encourage other teams to test out the design sprint process as well!\n\n## Support at GitLab for design sprints\n[A remote design sprint](https://gitlab.com/groups/gitlab-org/ci-cd/deploy-stage/environments-group/-/epics/1) helped the Environments team to come together and make a contribution to solving a large problem. We were able to come out of the sprint with a clear concept to move forward with and a shared understanding around what the future of environments at GitLab could be. I was motivated to further document the resources that came out of this activity and make it accessible to the team. We landed on [a design sprint process](https://about.gitlab.com/handbook/product/ux/design-sprint/) that can be shared, re-used, and built upon by other designers. Not only were we able to solve something that fit what we had been looking for this whole time, but the team came together during the process and built it up together.",[1144,959],{"slug":1407,"featured":6,"template":678},"remote-design-sprints","content:en-us:blog:remote-design-sprints.yml","Remote Design Sprints","en-us/blog/remote-design-sprints.yml","en-us/blog/remote-design-sprints",{"_path":1413,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1414,"content":1420,"config":1425,"_id":1427,"_type":16,"title":1428,"_source":17,"_file":1429,"_stem":1430,"_extension":20},"/en-us/blog/how-to-secure-cloud-run-deployment-with-auto-devops",{"title":1415,"description":1416,"ogTitle":1415,"ogDescription":1416,"noIndex":6,"ogImage":1417,"ogUrl":1418,"ogSiteName":692,"ogType":693,"canonicalUrls":1418,"schema":1419},"How to secure Google Cloud Run deployment with GitLab Auto DevOps","This tutorial will help teams speed development, improve security, and harness the power of serverless technology.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682898/Blog/Hero%20Images/cloud-security.png","https://about.gitlab.com/blog/how-to-secure-cloud-run-deployment-with-auto-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to secure Google Cloud Run deployment with GitLab Auto DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Regnard Raquedan\"}],\n        \"datePublished\": \"2023-08-21\",\n      }",{"title":1415,"description":1416,"authors":1421,"heroImage":1417,"date":1422,"body":1423,"category":14,"tags":1424},[721],"2023-08-21","\nTeams looking for efficiency often look to GitLab and serverless platforms to minimize management overhead and speed deployment times. GitLab's tight integration with [Google Cloud Run](https://cloud.google.com/run) means that teams can take advantage of the industry-leading DevSecOps platform to deliver container-based applications securely and efficiently.\n\nThis tutorial will show you how to deploy applications to Cloud Run using GitLab [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/), a feature that lets developers quickly use CI/CD pipelines via pre-built templates. This approach can accelerate testing and deployment because stages and jobs are already pre-configured.\n\n## Prerequisites\nBefore you begin, make sure you have the following:\n- a Google Cloud project with Cloud Run and Cloud Build APIs enabled\n- a Google Cloud service account with Cloud Run Admin, Cloud Build Service Agent, Service Account User, and Project Viewer permissions\n- a GitLab project containing your application code\n\n### Demo walkthrough\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/hIFagDyo3f8\" title=\"YouTube video player\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share\" allowfullscreen>\u003C/iframe>\n\n\n**Step 1:** Configure Google Cloud credentials\n\nTo start, use the Google Cloud service account with the necessary permissions. Once you have the service account, export its key to a JSON file and encode it using base64.\n\n**Step 2:** Add Auto DevOps to your GitLab project\n\nNavigate to your GitLab project and create a new file at the root called \"gitlab-ci.yml.\" Add the following lines of code to include the Auto DevOps template, which automatically configures your pipeline based on project settings and configuration:\n\n```\ninclude:\n  - template: Auto-DevOps.gitlab-ci.yml\n```\n\nCommit the changes to your project.\n\n**Step 3:** Configure environment variables\n\nAdd the following environment variables to your GitLab project:\n\n* `BASE64_GOOGLE_CLOUD_CREDENTIALS`: The base64-encoded JSON file containing your service account key. Make sure to mask this variable.\n* `PROJECT_ID`: The Google Cloud project ID.\n* `SERVICE_ID`: The service ID that will be used for Cloud Run. For this tutorial, we'll use \"nodejs\" as our service ID.\n\n**Step 4:** Configure the CI/CD pipeline\n\nModify the \"gitlab-ci.yml\" file to add Google Cloud SDK, gcloud commands, Docker, and the necessary configurations for deploying your application to Cloud Run. \n\n```\nimage: google/cloud-sdk:latest\n```\n\nAdditionally, use Google Cloud Build to generate the container image required for deployment. Commit the changes to your project.\n\n```\ndeploy:\n  stage: deploy\n  script:\n    - export GOOGLE_CLOUD_CREDENTIALS=$(echo $BASE64_GOOGLE_CLOUD_CREDENTIALS | base64 -d)\n    - echo $GOOGLE_CLOUD_CREDENTIALS > service-account-key.json \n    - gcloud auth activate-service-account --key-file service-account-key.json \n    - gcloud config set project $PROJECT_ID \n    - gcloud auth configure-docker\n    - gcloud builds submit --tag gcr.io/$PROJECT_ID/$SERVICE_ID\n    - gcloud run deploy $SERVICE_ID --image gcr.io/$PROJECT_ID/$SERVICE_ID --region=us-central1 --platform managed --allow-unauthenticated \n```\n\n**Step 5:** Finalize the DAST stage\n\nOnce your application has been deployed to Cloud Run, complete the dynamic application security testing ([DAST](https://docs.gitlab.com/ee/user/application_security/dast/)) stage in the CI/CD pipeline to ensure your application is more secure. Add the Cloud Run URL to your \"gitlab-ci.yml\" file and enable full_scan and browser_scan options. Commit the changes to your project.\n\n```\nvariables:\n  DAST_WEBSITE: \u003Cproject URL>\n  DAST_FULL_SCAN_ENABLED: \"true\"\n  DAST_BROWSER_SCAN: \"true\" \n```\n\nIn this tutorial, we successfully deployed a Cloud Run application using GitLab's Auto DevOps. By following these steps, you can enjoy faster development and improved security, and harness the power of serverless technology.\n",[726,873,1307],{"slug":1426,"featured":6,"template":678},"how-to-secure-cloud-run-deployment-with-auto-devops","content:en-us:blog:how-to-secure-cloud-run-deployment-with-auto-devops.yml","How To Secure Cloud Run Deployment With Auto Devops","en-us/blog/how-to-secure-cloud-run-deployment-with-auto-devops.yml","en-us/blog/how-to-secure-cloud-run-deployment-with-auto-devops",{"_path":1432,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1433,"content":1439,"config":1446,"_id":1448,"_type":16,"title":1449,"_source":17,"_file":1450,"_stem":1451,"_extension":20},"/en-us/blog/remote-development-beta",{"title":1434,"description":1435,"ogTitle":1434,"ogDescription":1435,"noIndex":6,"ogImage":1436,"ogUrl":1437,"ogSiteName":692,"ogType":693,"canonicalUrls":1437,"schema":1438},"Behind the scenes of the Remote Development Beta release","Discover the epic journey of GitLab's Remote Development team as they navigate last-minute pivots, adapt, and deliver new features for users worldwide.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679888/Blog/Hero%20Images/remotedevelopment.jpg","https://about.gitlab.com/blog/remote-development-beta","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Behind the scenes of the Remote Development Beta release\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2023-08-16\",\n      }",{"title":1434,"description":1435,"authors":1440,"heroImage":1436,"date":1442,"body":1443,"category":14,"tags":1444},[1441],"David O'Regan","2023-08-16","\nIn May 2023, the Create:IDE team faced an epic challenge – to merge the [Remote Development Rails monolith integration branch](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/105783) into the `master` branch of the GitLab Project. This was no small ask, as the merge request was of considerable size and complexity. In this blog post, we'll delve into the background, justifications, and process behind this endeavor.\n\nThe merge request titled \"Remote Development feature behind a feature flag\" was initiated by the Create:IDE team, aiming to merge the branch \"remote_dev\" into the \"master\" branch in the Rails monolith GitLab project. The MR contained `4` commits, `258` pipelines, and `143` changes that amounted to a total of `+7243` lines of code added to the codebase.\n\nInitially, the MR was created to reflect the work related to \"Remote Development\" under the \"Category: Remote Development.\" It was primarily intended to have CI pipeline coverage for the integration branch and was not meant for individual review or direct merging. The plan was to merge this code into the master branch via the [\"Remote Development Beta - Review and merge\" Epic](https://gitlab.com/groups/gitlab-org/-/epics/10258).\n\n![SUM](https://about.gitlab.com/images/blogimages/remote-development/SUM.png){: .shadow.medium}\n\n### How the Remote Development project started\nAs a team, we embarked on an ambitious journey to create a greenfield feature: the [Remote Development](https://docs.gitlab.com/ee/user/project/remote_development/) offering at GitLab. This feature had a vast scope, many unknowns, and required solving numerous new problems. To efficiently tackle this task, we decided to work on an integration branch using a [low-ceremony process](https://stackoverflow.com/questions/68092498/what-does-low-ceremony-mean). This decision enabled us to develop and release the feature in an impressively short time frame of less than four months.\n\nWorking on an integration branch provided us the flexibility to make significant progress, but it was always intended to eventually break down the work into smaller, iterative MRs that would follow the standard [GitLab review process](https://docs.gitlab.com/ee/development/code_review.html). We had a [detailed plan](https://gitlab.com/gitlab-org/remote-development/gitlab-remote-development-docs/-/blob/main/doc/integration-branch-process.md#master-mr-process-summary) for this process, but we realized that following the original plan would not allow us to meet our goal of releasing of the feature in GitLab 16.0.\n\n### Merging the integration branch MR without breaking it up\nDuring the development of the Remote Development feature, our team faced several challenges that led us to adopt a new approach for merging the integration branch into the master. First, as part of our [velocity-based XP/Scrum style process](https://about.gitlab.com/handbook/engineering/development/dev/create/ide/#-remote-development-iteration-planning), we realized that meeting the 16.0 release goal would require us to cut scope. A velocity report, \"[Velocity-based agile planning report](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/118436),\" highlighted that breaking down and reviewing individual MRs would take too long, considering the impending due date and the likelihood of last-minute scope additions.\n\nSecond, we [made the decision](https://gitlab.com/gitlab-org/gitlab/-/issues/398227#note_1361192858) to release workspaces as a **beta feature for public projects** for customers in [GitLab 16.0](/releases/2023/05/22/gitlab-16-0-released/#remote-development-workspaces-available-in-beta-for-public-projects). This approach reduced the complexity of the rollout plan and allowed us to get valuable feedback earlier, but required us to enable the feature by default earlier than planned. To align with this decision, we determined that merging the integration branch after review was the best course of action. An announcement was made to explain the change in plan, and we set specific timelines for the review process to ensure smooth coordination.\n\n> Hello Reviewers/Maintainers 👋 We have opened up a Zoom room through all of next week as an easy sync place for us all to collaborate and triage questions. As the MR is quite large, it might be overwhelming to determine where to begin. To help, we will aim to furnish a summary of what we have included, such as two new database tables and a couple of GraphQL/REST APIs. We will also be available through the week in the Zoom room and without it being too prescriptive of a approach, I would suggest we do a sync walkthrough of the MR first and then kick off the reviews.\n\nAddressing the concerns about risk, team members discussed the challenges and potential solutions. While there were apprehensions, we were confident in the overall quality of the feature. A disciplined plan for merging MRs was initially considered, but based on our velocity metrics, it was evident that meeting the public beta release goal required a new strategy.\n\nDespite the deviations from our usual practices, we acknowledged the urgency to deliver the initial release on time. The decision was not taken lightly, and we ensured that the merge had extensive [test coverage](https://docs.gitlab.com/ee/ci/testing/test_coverage_visualization.html) and [feature flags](https://docs.gitlab.com/ee/operations/feature_flags.html) in place to address any potential issues. We accepted that some aspects would be overlooked in the initial MR review cycle, but we committed to addressing them in subsequent iterations.\n\n### Keeping the pipeline green and stable for the merge\nTo ensure the successful merge of the integration branch containing the Remote Development feature, our team made significant efforts to keep the pipeline green and stable. As the MR was quite large and contained critical functionality, it was crucial to maintain a high level of quality and reduce the risk of introducing regressions.\n\nTo address these challenges, the team adopted a disciplined approach to [CI/CD](https://about.gitlab.com/topics/ci-cd/). Throughout the development process, CI pipelines were carefully monitored, and any failing tests or issues were promptly addressed. The team conducted rigorous testing and code reviews to identify and fix potential bugs and ensure that the changes did not negatively impact the existing functionality of the codebase.\n\nAdditionally, extensive test coverage was put in place to ensure that the new feature worked as expected and did not cause unintended side effects. The team utilized GitLab's [test coverage visualization](https://docs.gitlab.com/ee/ci/testing/test_coverage_visualization.html) capabilities to track the extent of test coverage and identify areas that required additional testing.\n\n![PIPE](https://about.gitlab.com/images/blogimages/remote-development/PIPE.png){: .shadow.medium}\n\n## The merging process\nAs part of the Remote Development team, we took a strategic approach to the merging process. We identified three categories of follow-up tasks that needed to be addressed after the release:\n\n1. **To-dos:** This category encompassed follow-up issues that required further attention.\n2. **Disabled linting rules:** Any issues related to disabled linting rules were included in this category.\n3. **Follow-up from review:** Non-blocking concerns raised during the review process were categorized here.\n\nTo manage this process effectively, we organized these categories into [child epics](https://docs.gitlab.com/ee/user/group/epics/manage_epics.html#multi-level-child-epics) under the main epic representing the merging effort.\n\n1. Child epic for [to-do follow-up issues](https://gitlab.com/groups/gitlab-org/-/epics/10472)\n2. Child epic for [disabled linting rules follow-up issues](https://gitlab.com/groups/gitlab-org/-/epics/10473)\n3. Child epic for [follow-up issues from review](https://gitlab.com/groups/gitlab-org/-/epics/10474)\n\n\n## Reviewer resources\nDuring the integration branch merge process for the Remote Development feature, we ensured a smooth and collaborative review experience for all involved. To facilitate this, we set up the following resources and documented the information in GitLab's issue, epic, and MR reviews for better persistence and traceability:\n\n1. **Dedicated Slack channel:** We had a Slack channel that served as our primary hub for coordinating reviews and resolving any blockers that arose during the process. The discussions, decisions, and important points discussed in this channel were documented in the related GitLab issues and epics. This approach enabled us to maintain a historical record of the conversations for to refer back to in the future.\n2. **General Slack channel:** For non-urgent or non-blocking questions and discussions, reviewers could use the a general Slack channel. Similar to the dedicated channel, we documented the relevant information from this channel in the corresponding issues and MR reviews in GitLab.\n3. **Addressing urgent issues:** When urgent issues required immediate attention, reviewers could directly address our technical leads [Vishal Tak](https://gitlab.com/vtak) and/or [Chad Woolley](https://gitlab.com/cwoolley-gitlab) in their Slack messages. However, we kindly requested that [direct messages were avoided](https://about.gitlab.com/handbook/communication/#avoid-direct-messages) to promote open collaboration. The resolutions to these urgent issues were documented in the corresponding GitLab issues or MR discussions.\n4. **Zoom collaboration room:** The collaborative sessions held in the open Zoom room were not only beneficial for real-time discussions but also for fostering a collaborative environment. After each session, we summarized the key points and decisions made during the meeting in the associated GitLab issue or MR, making sure all important outcomes were captured and accessible to the team.\n\nThroughout the review process, we were committed to maintaining a seamless and well-documented workflow. By capturing all relevant information in GitLab issues, epics, and MR reviews, we ensured that the knowledge was persistently available, and future team members could easily understand the context and decisions made during the integration process.\n\n## Application security review\nDuring the application security review process, we focused on providing a secure and reliable Remote Development feature for our users. Here are the key resources and updates related to the application security review:\n\n1. **Main application security review issue:** The main application security review issue served as the central hub for tracking security-related considerations. You can find the defined process we followed [here](https://about.gitlab.com/handbook/security/security-engineering/application-security/appsec-reviews.html).\n2. **Application security review comment:** The application security review issue contained a comment indicating that the merge was not blocked unless there were severe issues that could impact production. \"In order to maintain a smooth merge process, we do not block MRs from being merged unless we identify severe issues that could prevent the feature from going into production, such as S1 or S2 level problems. If you are aware of any design flaws or concerns that might qualify as such issues, please bring them to our attention. We can review them together and address any questions or concerns that arise. Let's work collaboratively to find an approach that works for both parties. 👍\"\n3. **Engineering perspective:** For managing the application security review process from an engineering team perspective, we had a dedicated issue, which is kept confidential for security reasons. \n4. **Security and authentication matters:** All security and authentication concerns pertaining to the Beta release were documented within the [`Remote Development Beta -Auth` epic](https://gitlab.com/groups/gitlab-org/-/epics/10377). As of April 30, 2023, we are delighted to announce that **no known issues or obstacles were found that would impede the merge**. This represents a significant accomplishment, considering the intricate nature of this new feature.\n5. **Initial question raised:** During the application security review, one initial question was raised, and we promptly addressed it. You can track the issue and our response [here](https://gitlab.com/gitlab-org/gitlab/-/issues/409317).\n\n## Database review\nTo ensure the reliability and efficiency of the Remote Development feature, we sought guidance from the database reviewer. Although the team had not conducted a thorough self-review, we were fully prepared to address any blocking issues raised during the review process. Our references for the review were:\n\n- [Database review documentation](https://docs.gitlab.com/ee/development/database_review.html)\n- [Database reviewer guidelines](https://docs.gitlab.com/ee/development/database/database_reviewer_guidelines.html)\n\nAs an example, during the database migration review, a discussion arose between [Alper Akgun](https://gitlab.com/a_akgun) and Chad, regarding the efficient ordering of columns in the workspaces table. Alper initially suggested placing integer values at the beginning of the table based on relevant documentation.\n\nChad questioned the benefit of this suggestion, pointing out that the specific integer field, `max_hours_before_termination`, would still be padded with empty bytes even if moved to the front, due to its current position between two text fields.\n\nAlper proposed an alternative approach, emphasizing that organizing variable-sized fields (such as `text`, `varchar`, `arrays`, `json`, `jsonb`) at the end of the table could be sufficient for the workspaces table.\n\nUltimately, Chad took the initiative to implement the changes, moving all variable length fields to the end of the table, and documented the discussion as a comment to address review suggestions.\n\nWith this collaborative effort, the workspaces table was efficiently optimized, and the team gained valuable insights into database column ordering strategies.\n\n![DB](https://about.gitlab.com/images/blogimages/remote-development/DB.png){: .shadow.medium}\n\n## Ruby code review\nDuring the Ruby code review phase, we followed a meticulous approach by conducting a comprehensive self-review of every line of code. Our goal was to ensure the highest code quality and address any potential issues identified by the reviewers effectively.\n\nTo ensure clarity, it's important to clarify that the Ruby code review primarily focused on backend changes and server-side improvements. This included optimizing performance, enhancing functionalities, and refining the overall codebase to deliver a seamless user experience.\n\nFor the code review process, we referred to the [Code review documentation](https://docs.gitlab.com/ee/development/code_review.html), a valuable resource that guided us in maintaining industry best practices and adhering to the GitLab community's coding standards.\n\n### Example: Enhance error messages for unavailable features\nAs an example during the code review, we addressed an essential aspect of the workspace method, focusing on how we handle scenarios related to the `remote_development_feature_flag` and the `remote_development` licensed feature. The primary objective was to enhance the error messages presented to users when these features are not available.\n\nInitially, the code employed identical error messages for both cases, making it less clear to users whether the issue was due to a missing license or a disabled feature flag. This ambiguity could lead to confusion and hinder the user experience.\n\n#### The suggested improvement\nDuring the review, one of our maintainers, [Peter Leitzen](https://gitlab.com/splattael), raised an important question: \"Are we OK with having only a single error message for both cases (missing license and missing feature flag)?\"\n\nRecognizing the importance of clear communication, Chad proposed enhancing the error messages to provide distinct descriptions for each case. This improvement aimed to empower users by precisely conveying the reason behind the unavailability of certain features.\n\n#### The revised implementation\nFollowing Chad's suggestion, the code underwent the following changes:\n\n```ruby\nunless ::Feature.enabled?(:remote_development_feature_flag)\n  # TODO: Could have `included Gitlab::Graphql::Authorize::AuthorizeResource` and then use\n  #       raise_resource_not_available_error!, but didn't want to take the risk to mix that into\n  #       the root query type\n  raise ::Gitlab::Graphql::Errors::ResourceNotAvailable,\n    \"'remote_development_feature_flag' feature flag is disabled\"\nend\n\nunless License.feature_available?(:remote_development)\n  # TODO: Could have `included Gitlab::Graphql::Authorize::AuthorizeResource` and then use\n  #       raise_resource_not_available_error!, but didn't want to take the risk to mix that into\n  #       the root query type\n  raise ::Gitlab::Graphql::Errors::ResourceNotAvailable,\n    \"'remote_development' licensed feature is not available\"\nend\n\nraise_resource_not_available_error!('Feature is not available') unless current_user&.can?(:read_workspace)\n```\n\n#### The value of distinct error messages\nBy implementing distinct and descriptive error messages, we reinforce our commitment to user-centric development. Users interacting with our system will receive accurate feedback, helping them navigate potential roadblocks effectively. This enhancement not only improves the user experience but also streamlines troubleshooting and support processes.\n\nThis code review example highlights the significance of concise and informative error messages in delivering a top-notch user experience within the GitLab ecosystem. Our team's collaborative efforts ensure that users can confidently interact with our platform, knowing they'll receive clear and helpful error messages when needed.\n\n![BE1](https://about.gitlab.com/images/blogimages/remote-development/BE1.png){: .shadow.medium}\n\n### Example: Improving performance and addressing N+1 issues in WorkspaceType\nIn a recent code review, our team focused on optimizing the WorkspaceType and addressing potential N+1 query problems. The discussion involved two key contributors, [Laura Montemayor](https://gitlab.com/lauraX) and Chad, who worked together to enhance the performance of the codebase.\n\n#### Identifying the performance concerns\nDuring the review, Laura raised a performance concern regarding the possibility of N+1 queries in the WorkspaceType resolver. She suggested that preloading certain associations could be beneficial to avoid this common performance issue.\n\n#### A separate issue for N+1 control\nChad took prompt action and created a separate issue specifically aimed at resolving the N+1 query problems. The new issue, titled \"Address review feedback: Resolve N+1 issues,\" would address the concerns raised by Laura and implement the necessary preloading.\n\n#### Evaluating the potential N+1 impact\nChad provided insightful information about the low risk of real N+1 impact from two particular fields in the current implementation. He elaborated on how the queries for user and agent associations would largely be cache hits due to scoping and usage patterns. Chad diligently examined the cache hits happening in development, confirming the potential optimization.\n\nHere's a code snippet from the initial implementation:\n\n```ruby\n# Initial Implementation\nclass WorkspaceType \u003C BaseType\n  field :user, ::Types::UserType,\n    description: \"User associated with this workspace\",\n    null: true\n\n  field :agent, ::Types::AgentType,\n    description: \"Agent associated with this workspace\",\n    null: true\n\n  # Resolver for the user association\n  def user\n    object.user\n  end\n\n  # Resolver for the agent association\n  def agent\n    object.agent\n  end\nend\n```\n\n#### Treating performance as a priority\nBoth contributors acknowledged the significance of addressing the performance concern, with Laura emphasizing its importance. They agreed to prioritize the separate issue dedicated to resolving the N+1 queries and ensuring proper test coverage.\n\nHere's a code snippet from the revised implementation:\n\n```ruby\n# Revised Implementation with Preloading\nclass WorkspaceType \u003C BaseType\n  field :user, ::Types::UserType,\n    description: \"User associated with this workspace\",\n    null: true\n\n  field :agent, ::Types::AgentType,\n    description: \"Agent associated with this workspace\",\n    null: true\n\n  # Resolver for the user association with preloading\n  def user\n    ::Dataloader.for(::User).load(object.user_id)\n  end\n\n  # Resolver for the agent association with preloading\n  def agent\n    ::Dataloader.for(::Agent).load(object.agent_id)\n  end\nend\n```\n\n#### Considering future usage\nChad expressed excitement about the possibility of the new feature gaining significant usage. He humorously stated that encountering enough legitimate traffic on workspaces to trigger any performance impact would be a delightful problem to have, as it would indicate a growing user base.\n\n#### Collaboration and performance improvement\nThe code review exemplifies the collaborative and proactive approach of our team in optimizing the WorkspaceType. The team's dedication to addressing performance concerns ensures that our codebase remains performant and efficient, even as our user base grows.\n\n![BE2](https://about.gitlab.com/images/blogimages/remote-development/BE2.png){: .shadow.medium}\n\n## Frontend code review\nThe frontend code review process was managed by our resident `Create: IDE` frontend maintainers, [Paul Slaughter](https://gitlab.com/pslaughter) and [Enrique Alcátara](https://gitlab.com/ealcantara). Additionally, a significant portion of the new frontend UI code had already undergone separate reviews and was merged to master, contributing to the overall quality of the Remote Development feature.\n\n### Example: Collaborative code improvement for ApolloCache Mutators\nPaul started a thread on an old version of the diff related to `ee/spec/frontend/remote_development/pages/create_spec.js``. The code snippet in question involved creating a mock Apollo instance and writing queries to the cache.\n\n#### The initial implementation\nInitially, the code involved writing to the cache twice, which raised concerns among the maintainers, Paul and Enrique. Paul pointed out that the duplicate write was unintentional and wondered if the writeQuery was even necessary, given the removal of @client directives. However, he also acknowledged the need to test that the created workspace was added to the ApolloCache.\n\n```javascript\n// Initial Implementation\nconst buildMockApollo = () => {\n  // ... Other mock setup ...\n  \n  // Initial writeQuery for userWorkspacesQuery\n  mockApollo.clients.defaultClient.cache.writeQuery({\n    query: userWorkspacesQuery,\n    data: USER_WORKSPACES_QUERY_EMPTY_RESULT.data,\n  });\n\n  // ... Other mock setup ...\n};\n```\n\n#### Identifying a potential issue\nEnrique agreed that the duplicate write was unintentional and probably introduced during a rebase. He explained that pre-populating the cache with a user workspaces query empty result was essential for the mutator to have a place to add the workspace. However, he encountered difficulties in making the workaround work effectively in unit tests.\n\n#### Resolving the issue\nPaul highlighted the significance of pre-populating the cache with the user workspaces query empty result. He suggested leaving a comment to explain the necessity of the initial writeQuery, as it would be implicitly coupled to future writeQuery operations.\n\n```javascript\n// Resolving the Issue - Leaving a Comment\n// Pre-populate the cache with user workspaces query empty result to provide a place\n// for the mutator to add the Workspace later. This is needed for both test and production environments.\nmockApollo.clients.defaultClient.cache.writeQuery({\n  query: userWorkspacesQuery,\n  data: USER_WORKSPACES_QUERY_EMPTY_RESULT.data,\n});\n```\n\nHowever, upon further investigation, Paul discovered that the writeQuery might not be needed, and the issue might be a symptom of an underlying problem. He decided to open a separate thread to address this concern and indicated that he would work on a separate MR to handle it.\n\n```javascript\n// Resolving the Issue - Opening a Separate Thread and MR\n// Open a separate thread to discuss potential underlying issues.\n// Plan to work on a separate MR to handle it.\n// Stay tuned for updates!\n```\n\n![FE](https://about.gitlab.com/images/blogimages/remote-development/FE.png){: .shadow.medium}\n\n## What we learned\nAs part of the Remote Development team, we faced the challenge of merging the Remote Development Rails monolith integration branch to meet our ambitious release goal. We adapted to last-minute pivots and focused on minimizing risks during the review process. The successful merge brought us one step closer to benefiting GitLab users worldwide. We acknowledged areas for improvement and remained committed to refining the feature's quality. Our journey reflects our dedication to delivering results, embracing change, and pushing boundaries in the DevOps community. The release of the Remote Development feature in GitLab 16.0 is a significant milestone for GitLab, and we continue to iterate and grow, providing innovative solutions for developers worldwide.\n\nAn outcome of this process was an ongoing conversation to propose a [simplified review process for greenfield features](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/125117). Through this proposal, we aim to distill the lessons we learned during this experience and provide guidance to future teams facing similar challenges.\n\n## What is next for Remote Development?\nAfter the merge of the MR, several changes were implemented:\n- The first production tests were conducted to ensure the stability and functionality of the merged code.\n- Collaboration took place between the Dev Evangelism and Technical Marketing teams, focusing on [creating content](https://gitlab.com/groups/gitlab-com/marketing/developer-relations/-/epics/190). This collaboration aimed to troubleshoot any issues that arose during the merge.\n- Feedback from the community was taken into account, and changes were made to address the concerns raised. This feedback was incorporated into an [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/410031) and influenced the overall roadmap and direction of the project.\n\nDo you want to [contribute to GitLab](/community/contribute/)? Come and join in the conversation in the `#contribute` channel on GitLab's [Discord](https://discord.gg/gitlab), or just pop in and say \"Hi.\"\n\n",[726,725,727,873,1445,703],"contributors",{"slug":1447,"featured":6,"template":678},"remote-development-beta","content:en-us:blog:remote-development-beta.yml","Remote Development Beta","en-us/blog/remote-development-beta.yml","en-us/blog/remote-development-beta",{"_path":1453,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1454,"content":1460,"config":1468,"_id":1470,"_type":16,"title":1471,"_source":17,"_file":1472,"_stem":1473,"_extension":20},"/en-us/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated",{"title":1455,"description":1456,"ogTitle":1455,"ogDescription":1456,"noIndex":6,"ogImage":1457,"ogUrl":1458,"ogSiteName":692,"ogType":693,"canonicalUrls":1458,"schema":1459},"Building GitLab with GitLab: How GitLab.com inspired Dedicated","Learn how the multi-tenancy SaaS solution, GitLab.com, influenced the design of the single-tenancy SaaS, GitLab Dedicated.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659740/Blog/Hero%20Images/building-gitlab-with-gitlab-no-type.png","https://about.gitlab.com/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Building GitLab with GitLab: How GitLab.com inspired Dedicated\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andrew Newdigate\"},{\"@type\":\"Person\",\"name\":\"Craig Miskell\"},{\"@type\":\"Person\",\"name\":\"John Coghlan\"}],\n        \"datePublished\": \"2023-08-03\",\n      }",{"title":1455,"description":1456,"authors":1461,"heroImage":1457,"date":1465,"body":1466,"category":14,"tags":1467},[1462,1463,1464],"Andrew Newdigate","Craig Miskell","John Coghlan","2023-08-03","\nEarlier this year, we announced [the general availability of GitLab Dedicated](https://about.gitlab.com/blog/gitlab-dedicated-available/), our single-tenancy software-as-a-service (SaaS) offering. Dedicated, which addresses the needs of customers with stringent compliance requirements while maintaining speed, efficiency, and security, was developed from the lessons we learned building and using GitLab.com, our multi-tenancy model. Although there is overlap in how we manage both platforms, such as the same service-level monitoring stack, there were significant considerations that sparked the need for new design decisions, including how we approach automation, databases, monitoring, and availability. In this blog, we share some of those decision points and their outcomes.\n\n## GitLab platform options\nBefore we dive into the evolution of GitLab Dedicated, let’s level-set on GitLab’s [portfolio of platform models](https://docs.gitlab.com/ee/subscriptions/choosing_subscription.html#choose-a-subscription):\n- GitLab.com, a.k.a. multi-tenant GitLab SaaS on our pricing page and in our documentation\n- GitLab Dedicated, single-tenant SaaS that satisfies compliance requirements such as data residency, isolation, and private networking\n- GitLab self-managed, in which customers install, administer, and maintain their own GitLab instance\n\nEach method meets the different needs of our wide range of customers and requires a unique approach for how we create, package, and deploy the application.\n\nWhile both GitLab.com and Dedicated are SaaS-based, there are key differences between the two. The multi-tenant GitLab.com is the largest hosted instance of GitLab and services thousands of customers and millions of users. Because the platform's reliability is critical to so many customers and because of the iterative nature of how GitLab.com was built, decisions have been made along the way that are unique to the scale of this specific instance.\n\nIn contrast, GitLab Dedicated is a single-tenant SaaS application that is hosted by GitLab in the customer's region of choice (GitLab.com is hosted in the U.S.). While still providing a GitLab-managed SaaS solution for our customers, Dedicated instances are fully isolated from one another, running on a platform that automates the configuration and provisioning of the instances, along with automating as many of the day-two operations as possible, such as maintenance, monitoring, and optimization.\n\nHere are some examples of how Dedicated has used the blueprint of GitLab.com.\n\n## Improved automated deployments\nGitLab.com is a permanent installation with a great deal of history, having evolved significantly since it was first developed. Originally, it was deployed on a single instance in Amazon AWS, before migrating to Microsoft Azure, where it continued to scale out. From Azure, it migrated to its current cloud, Google Cloud Platform. Since then, many customer workloads have [migrated into Kubernetes](https://about.gitlab.com/blog/year-of-kubernetes/) and are supported by the Google Kubernetes Engine ([GKE](https://cloud.google.com/kubernetes-engine)).\n\nWith GitLab Dedicated, we're building smaller instances that rely on automation, repeatability, and deterministic environments. All customer tenant GitLab instance operations must be 100% automated, including provisioning, upgrades, scaling, configuration changes, and any other routine operations. The stack relies heavily on the GitLab Environment Toolkit ([GET](https://gitlab.com/gitlab-org/gitlab-environment-toolkit/-/blob/main/docs/environment_advanced_hybrid.md)) Cloud Native Hybrid, which uses the GitLab Helm charts for stateless workloads (e.g., Rails) and Omnibus for deployments to VMs (e.g., Gitaly). GET helps with the deployments targeting [reference architectures](https://docs.gitlab.com/ee/administration/reference_architectures/) and coordinating the provisioning of cloud resources, including compute instances, Kubernetes clusters, managed Postgres databases and more.\n\nAs much as GET automates, it has a certain amount of required setup, which is acceptable to perform manually for one-off or otherwise long-lived deployments, but in order to scale Dedicated we also had to automate that process, which we did with Terraform. Because this was a greenfield approach, we were able to be particularly careful with privileges. Our current cloud deployment target is AWS, so we developed a detailed identity and access management ([IAM](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html)) policy to grant each stage of deployment only the strictly necessary access. We also use IAM role assumption from trusted workloads in a central AWS account to eliminate the need for explicit credentials.  \n\nDeployments follow this process in order:\n- An account creation job running from a trusted location creates a fresh AWS account in an [AWS Organization](https://docs.aws.amazon.com/organizations/index.html), placing it in the correct Organizational Unit to automatically have a [CloudFormation StackSet](https://docs.aws.amazon.com/organizations/latest/userguide/services-that-can-integrate-cloudformation.html) applied, with ongoing updates handled by AWS when needed. This allows us to operate the entire lifecycle of the tenant account using IAM Role Assumption rather than generating and storing static IAM credentials.\n- Prepare stage sets up a fresh AWS account ready to receive a deployment; the privileges are quite high powered, but still limited to the necessary areas, including creating the next role.  \n- Onboard stage creates some high-level resources and otherwise does the setup that GET requires to be able to run, including creating the roles for the next stages with their own limited privileges.  \n- Provision stage is mostly about running GET Terraform and creating the compute and storage resources onto which GitLab will be deployed, with a few additions for our specific needs.  \n- Configure stage runs to deploy the GitLab application onto the resources created earlier. At its core, this is the GET Ansible stage, but it includes our own Terraform wrapper as well to handle our specific needs.\n\nOnce these stages complete, a fully deployed GitLab instance is ready to go.  \n\nConfiguration changes and GitLab upgrades execute the same set of stages, ensuring everything is still configured correctly and applying any pending changes. In the early days of GitLab Dedicated this was done in GitLab CI/CD pipelines operating on GitLab.com, with the tenant descriptions as JSON files in a repository, which was an effective and simple place to start.  \n\nHowever, this multi-stage deployment is now managed by [Switchboard](https://about.gitlab.com/direction/saas-platforms/switchboard/), a portal we built specifically for GitLab Dedicated. Switchboard is a bespoke Rails application, which will be the single source of truth for configuration, accessible by customers to manage customer-facing settings, as well as GitLab Dedicated staff for general management. Switchboard will be responsible for automating regular upgrades, including gradual rollouts across the fleet of Dedicated instances.\n\n## Databases geared towards the needs of single tenancy\nGitLab.com uses self-managed Postgres and Redis. For GitLab Dedicated, we wanted to leverage AWS’s managed services as much as possible. Examples include RDS, Elasticache, and OpenSearch, the AWS Elasticsearch managed service. Some of these services may not always be able to support GitLab.com-scale platforms, but they handle the traffic of a single-tenant instance well and provide reliable failovers and ongoing maintenance with no effort on our part.\n\n## Monitoring aligned with strict compliance needs\nThe observability stack for GitLab Dedicated relies on the expertise we gained from building GitLab.com. The monitoring, logging, and availability infrastructure is all maintained within the customer's AWS account, nothing is shared. We receive low-context alerts from these private systems. They serve as a mechanism to direct us to the customer account so we can review what is going on and triage the underlying issues if needed. This is helpful with regulators and compliance as nothing can leak because it doesn't leave the system.\n\nWhile Dedicated and GitLab.com share much of the same monitoring stack, Dedicated instances have tended to reveal different issues within our application. This is due to GitLab.com being a multi-tenant instance, while GitLab Dedicated instances are single-tenant. \n\nThink of the adage, \"[Your 9s are not my 9s](https://rachelbythebay.com/w/2019/07/15/giant/).\" In a platform at the scale of GitLab.com, a subset of users who encounter an issue in part of the application may be a very small percentage of the overall user base. The small impact relative to the scale of the platform may not create an alert. In a single-tenant instance, however, the same bugs or scaling issues can quickly impact a higher percentage of the overall users of the instance, escalating the issue's importance. Applying our service-level monitoring to single-tenant GitLab instances has benefited GitLab users who had encountered bugs that were overlooked in the volume of GitLab.com usage. When we identify issues in a Dedicated instance, we resolve them within the product.\n\n## High availability for all components\nConsidering the hybrid environment and the level of service that we want to offer to our customers, we have made some minor changes from the [standard reference architecture](https://docs.gitlab.com/ee/administration/reference_architectures/).\n\nOne such change is introducing high availability for all components. For the lower size (i.e., up to 2,000 users), our architecture ships by default with all the components in full redundant mode. Components like RDS and Elasticache will have a replica in a different Availability Zone. This is referred to as the primary region and we have to define how it will look in the [Geo replicas](https://docs.gitlab.com/ee/administration/geo/setup/database.html).\n\n## Only on Dedicated\nIn addition to the other changes we made, we also built some features that are only used for GitLab Dedicated:\n- Bring your own key - customers can provide and manage the encryption keys used to encrypt AWS resources such as storage, allowing a customer to revoke access should that ever become necessary. This is not something that can be offered in a multi-tenant system like GitLab.com.\n- Switchboard - as mentioned above, Switchboard was purpose-built for Dedicated. It is a multi-tenant Ruby on Rails application, accessible by GitLab Dedicated customer administrators and GitLab Dedicated team members. Using this interface, customers can change the available application runtime settings, access provided graphs, add additional products, and more. The main Switchboard instance serves as a single source of truth for global configuration and status across multiple cloud providers and regions.\n- PrivateLink networking - allows traffic between tenant AWS accounts and customer accounts without exposing data to the internet. \n- Other network features - including traffic filtering and private hosted zones.\n\nDedicated has been an exciting project and a great learning experience for our team. We were able to apply the knowledge accumulated in building GitLab.com to deliver an important new product for our customers in a very efficient way. You can learn more about GitLab Dedicated by visiting our [Dedicated page](https://about.gitlab.com/dedicated/) or contacting a GitLab sales representative.\n\n_Check out the [first installment in our \"Building GitLab with GitLab\" series](https://about.gitlab.com/blog/building-gitlab-with-gitlab-api-fuzzing-workflow/), which takes you behind the scenes of the development of our web API fuzz testing._\n",[771,480,725,749],{"slug":1469,"featured":6,"template":678},"building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated","content:en-us:blog:building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated.yml","Building Gitlab With Gitlabcom How Gitlab Inspired Dedicated","en-us/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated.yml","en-us/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated",{"_path":1475,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1476,"content":1481,"config":1487,"_id":1489,"_type":16,"title":1490,"_source":17,"_file":1491,"_stem":1492,"_extension":20},"/en-us/blog/gitlab-gdk-remote-development",{"title":1477,"description":1478,"ogTitle":1477,"ogDescription":1478,"noIndex":6,"ogImage":1096,"ogUrl":1479,"ogSiteName":692,"ogType":693,"canonicalUrls":1479,"schema":1480},"Contributor how-to: Remote Development workspaces and GitLab Developer Kit","This tutorial helps you get GDK working inside Remote Development workspaces to begin contributing to GitLab.","https://about.gitlab.com/blog/gitlab-gdk-remote-development","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Contributor how-to: Remote Development workspaces and GitLab Developer Kit\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Raimund Hook\"}],\n        \"datePublished\": \"2023-07-31\",\n      }",{"title":1477,"description":1478,"authors":1482,"heroImage":1096,"date":1484,"body":1485,"category":14,"tags":1486},[1483],"Raimund Hook","2023-07-31","\nOpen source is fundamental to GitLab. We believe that [everyone can contribute](https://about.gitlab.com/company/mission/#mission).\nTypically, we recommend that anyone contributing anything more than basic changes to GitLab run the [GitLab Development Kit](https://gitlab.com/gitlab-org/gitlab-development-kit) (GDK). Because contributors can't always meet the GDK's resource demands, we're working to enable GDK inside the cloud-based GitLab Remote Development workspaces.\n\nIn this article, I'll explain how I used a Remote Development workspace running in my Kubernetes cluster to make working with the GDK faster and easier.\n\n## A preliminary note\nFirst, keep in mind that as of this writing the [Remote Development workspaces](https://about.gitlab.com/direction/create/ide/remote_development/) feature is still in Beta. My example here is therefore very much a proof of concept — and as such, it has some rough edges.\n\nBefore getting started, I followed the \"[Set up a workspace](https://docs.gitlab.com/ee/user/workspace/#set-up-a-workspace)\" prerequisites guide in the GitLab docs. For a more detailed set of instructions, see Senior Developer Evangelist Michael Friedrich's tutorial on [how to set up infrastructure for cloud development environments](https://about.gitlab.com/blog/set-up-infrastructure-for-cloud-development-environments/).\n\n## Getting started with workspaces\nTo start using workspaces, you will need a project configured with a `.devfile.yaml`. GitLab team members have curated [a number of example projects](https://gitlab.com/gitlab-org/remote-development/examples) you can review.\n\nInitially, I tried to do this with a fork of the GitLab project itself, but I ran into [some issues](https://gitlab.com/gitlab-org/gitlab/-/issues/414011) when the workspace begins cloning the repository.\n\nTo figure out what was causing my problems, I looked more closely at what happens behind the scenes when a workspace is created.\n\n## Behind the scenes with Remote Development workspaces\nWhen you create a new workspace, the following happens:\n1. The GitLab agent for Kubernetes creates a new namespace in your cluster. The agent dynamically generates a name for and assumes management of the namespace.\n1. Inside the namespace, a new deployment is created, specifying the container you chose in your `.devfile.yaml` as the image to use.\n1. This deployment is configured with some [init containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) that perform some actions:\n    1. Cloning the repository into `/project/${project_path}`.\n    1. Injecting the VS Code server binary into your container.\n1. Once those init containers are complete, your container starts and the workspace becomes available.\n\n## The clone problem\nWhen cloning a repository, `git` tends to do much of the work in memory. This can be a challenge on larger projects/repositories, as it can require significant amounts of RAM. When cloning the GitLab project, for instance, git consumes approximately 1.6GB of RAM. This number is only going to increase with time. Sure, strategies like [shallow clones](https://git-scm.com/docs/git-clone#Documentation/git-clone.txt---depthltdepthgt) can help reduce this, but these are perhaps less suited to active use by a developer as they can increase the amount of time required to perform ongoing git operations.\n\nIn fact, creating a workspace using our `.devfile.yaml` in a fork of the GitLab project failed for this reason. The init container performing the clone is currently hard-limited to 128MiB of RAM, after which the memory management processes on the node kill the container.\n\nTo overcome this limitation, move the `.devfile.yaml` into the a fork of the root of the GDK repository. This project clones more quickly (and does so using fewer resources), so it's a  perfect starting point for running GDK itself. Another (bonus) advantage: You're then primed to contribute to the GDK itself, in addition to any of the other GitLab projects that the GDK clones.\n\n## Components of a GDK installation\nGDK clones the following projects from the GitLab 'family':\n* [GitLab](https://gitlab.com/gitlab-org/gitlab)\n* [Gitaly](https://gitlab.com/gitlab-org/gitaly)\n* [GitLab shell](https://gitlab.com/gitlab-org/gitlab-shell)\n\nThis allows you to work on any items in those directories as a part of your \"live\" installation.\n\n## Getting GDK installed and running in a workspace\nOnce I had a workspace up and running, my next step was to get GDK installed and running *in* that workspace. The GDK's documentation presents [several routes for doing this](https://gitlab.com/gitlab-org/gitlab-development-kit/#installation).\n\nA complete installation can take some time, as GDK needs to bootstrap itself and install a number of prerequisites. This is less than ideal in the context of a Remote Development workspace, as one of remote development's primary benefits is enabling access to a development environment rapidly. Requiring a user to bootstrap an environment that takes 50 minutes (or longer) doesn't help achieve this goal.\n\nTo combat this, I built a container image that effectively bootstraps and installs GDK, pre-building the GDK prerequisites and pre-seeding the database. This image and its associated tooling are currently [in review](https://gitlab.com/gitlab-org/gitlab-development-kit/-/merge_requests/3231).\n\n## Pre-building\nPre-building the container and running the bootstrap process on a scheduled basis allows us to perform that process once, without requiring the user to wait for something that can essentially be \"pre-canned\" for their use.\n\nOnce the workspace is running, we still need to \"reinstall\" the GDK environment with the latest version of our GitLab repository, but this step doesn't take quite as long as a complete bootstrap.\n\n## Generating a gdk.yml file\nTo work properly, GDK also requires a [`gdk.yml` file](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/main/doc/configuration.md#gdkyml). This file tells GDK how to configure GitLab to return the correct URLs and other items. To get GDK running in Remote Development, Rails needs to return URLs in a certain scheme (otherwise your browser won't know where to connect). To help this along, we [inject an environment variable](https://gitlab.com/gitlab-org/gitlab/-/issues/415328) into the workspace container. This variable helps us determine the URL in use (which is dynamically generated for each workspace).\n\nWe [now have a script](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/main/support/gitlab-remote-dev/remote-dev-gdk-bootstrap.sh?ref_type=heads) in GDK that will generate your `gdk.yml` file based on your workspace.\n\n## Creating our devfile\nThe contents of my `.devfile.yaml` looks like this:\n\n```yaml\nschemaVersion: 2.2.0\ncomponents:\n  - name: tooling-container\n    attributes:\n      gl/inject-editor: true\n    container:\n      # NB! This image is only in use until https://gitlab.com/gitlab-org/gitlab-development-kit/-/merge_requests/3231 is merged!\n      image: registry.gitlab.com/gitlab-org/gitlab-development-kit/gitlab-remote-workspace:stingrayza-gdk-remote-dev-add-container\n      memoryRequest: 10240M\n      memoryLimit: 16384M\n      cpuRequest: 2000m\n      cpuLimit: 6000m\n      endpoints:\n        - name: ssh-2222\n          targetPort: 2222\n        - name: gdk-3000\n          targetPort: 3000\n        - name: docs-3005\n          targetPort: 3005\n        - name: pages-3010\n          targetPort: 3010\n        - name: webpack-3808\n          targetPort: 3808\n        - name: devops-5000\n          targetPort: 5000\n        - name: jaeger-5778\n          targetPort: 5778\n        - name: objects-9000\n          targetPort: 9000\n        - name: shell-9122\n          targetPort: 9122\n```\n\nThis definition comes straight out of the [Workspace docs](https://docs.gitlab.com/ee/user/workspace/#devfile), and opens a number of ports that GDK uses. (For now, I've only tested the port `gdk-3000`, which is the the link to our instance of GDK.)\n\n## From Workspace to GDK\nOnce we have a project with a `.devfile.yaml`, our final step is to [create a new workspace](https://docs.gitlab.com/ee/user/workspace/#create-a-workspace).\n\nAs a part of this step, your cluster will pull the image as defined in the `.devfile.yaml` and start it up. For the GDK image we pre-built, this can take a few minutes.\n\nOnce the workspace is ready, the last step is to follow the link from the UI to connect to the workspace. This will open up a familiar VS Code IDE, with our GDK fork checked out.\n\nBut wait, where's GDK?\n\nWell, the pre-build did most of the work for us, but we still need to take a few final steps before we can claim that GDK is up and running. These have been built into a script we can run from the integrated terminal within the workspace.\n\nTo open a terminal, we can click on the VS Code Hamburger menu (top left), navigate to `Terminal` and select `New Terminal`.\n\nNow we execute the following script, which completes the setup and copies a couple of files over from the pre-built folders:\n\n```shell\nsupport/gitlab-remote-dev/remote-dev-gdk-bootstrap.sh\n```\n\nThis can take up to 15 minutes, but when it's done it should output the magic words — something like the following (note the 3000 in the URL; we specified that in the `.devfile.yaml` earlier):\n\n```shell\nSuccess! You can access your GDK here: https://3000-workspace-62637-2083197-apglwp.workspace.my-workspace.example.net/\n```\n\n## Connect to your GDK\nFollow the link as displayed using Cmd-click or Ctrl-click. After a couple of moments (GDK boot time), you should reach a familiar GitLab login screen.\n\nCongratulations! GDK is now running inside your Remote Development workspace.\n\nTo log in, type `gdk` in your terminal and you'll see the default admin credentials displayed near the bottom:\n\n```shell\n# Development admin account: xxxx / xxxx\n\nFor more information about GitLab development see\nhttps://docs.gitlab.com/ee/development/index.html.\n```\n\nLog into your GDK with the default credentials, change the admin user password, and you're all set!\n\n## Demo of workspace launch\nHere's a demo of launching a workspace in my personal cluster:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/iXq1NnTjnX0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## How to contribute to GitLab\nIn this article I explained how to get GDK up and running in Remote Development workspaces. This is not without its challenges, but the end result should mean that contributing to GitLab (especially in resource-constrained environments) is quicker and easier.\n\nDo you want to contribute to GitLab? Come and join in the conversation in the `#contribute` channel on GitLab's [Discord](https://discord.gg/gitlab), or just pop in and say \"hello.\"\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._",[726,894,727,873,1445,703],{"slug":1488,"featured":6,"template":678},"gitlab-gdk-remote-development","content:en-us:blog:gitlab-gdk-remote-development.yml","Gitlab Gdk Remote Development","en-us/blog/gitlab-gdk-remote-development.yml","en-us/blog/gitlab-gdk-remote-development",{"_path":1494,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1495,"content":1501,"config":1509,"_id":1511,"_type":16,"title":1512,"_source":17,"_file":1513,"_stem":1514,"_extension":20},"/en-us/blog/set-up-infrastructure-for-cloud-development-environments",{"title":1496,"description":1497,"ogTitle":1496,"ogDescription":1497,"noIndex":6,"ogImage":1498,"ogUrl":1499,"ogSiteName":692,"ogType":693,"canonicalUrls":1499,"schema":1500},"Cloud infrastructure for on-demand development in GitLab","Learn how to set up the requirements, manage Kubernetes clusters in different clouds, create the first workspaces and custom images, and get tips and troubleshooting.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659883/Blog/Hero%20Images/post-cover-image.jpg","https://about.gitlab.com/blog/set-up-infrastructure-for-cloud-development-environments","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Set up your infrastructure for on-demand, cloud-based development environments in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2023-07-13\",\n      }",{"title":1502,"description":1497,"authors":1503,"heroImage":1498,"date":1505,"body":1506,"category":14,"tags":1507},"Set up your infrastructure for on-demand, cloud-based development environments in GitLab",[1504],"Michael Friedrich","2023-07-13","Cloud-based development environments enable a better developer onboarding experience and help make teams more efficient. In this tutorial, you'll learn how to ready your infrastructure for on-demand, cloud-based development environments. You'll also learn how to set up the requirements, manage Kubernetes clusters in different clouds, create your first workspaces and custom images, and get tips for troubleshooting.\n\nThe GitLab agent for Kubernetes, an OAuth GitLab app, and a proxy pod deployment make the setup reproducible in different Kubernetes cluster environments and follow cloud-native best practices. Bringing your infrastructure allows platform teams to store the workspace data securely, control resource usage, harden security, and troubleshoot the deployments in known ways.\n\nThis blog post is a long read so feel free to navigate to the sections of interest. However, if you want to follow the tutorial step by step, the sections depend on one another for the parts pertaining to infrastructure setup.\n\n- [Development environments on your infrastructure](#development-environments-on-your-infrastructure)\n- [Requirements](#requirements)\n    - [Workspaces domain](#workspaces-domain)\n    - [TLS certificates](#tls-certificates)\n- [GitLab OAuth application ](#gitlab-oauth-application)\n- [Kubernetes cluster setup](#kubernetes-cluster-setup)\n    - [Set up infrastructure with Google Kubernetes Engine (GKE)](#set-up-infrastructure-with-google-kubernetes-engine=gke)\n    - [Set up infrastructure with Amazon Elastic Kubernetes Service (EKS)](#set-up-infrastructure-with-amazon-elastic-kubernetes-service-eks)\n    - [Set up infrastructure with Azure Managed Kubernetes Service (AKS)](#set-up-infrastructure-with-azure-managed-kubernetes-service-aks)\n    - [Set up infrastructure with Civo Cloud Kubernetes](#set-up-infrastructure-with-civo-cloud-kubernetes)\n    - [Set up infrastructure with self-managed Kubernetes](#set-up-infrastructure-with-self-managed-kubernetes)\n- [Workspaces proxy installation into Kubernetes](#workspaces-proxy-installation-into-kubernetes)\n- [Agent for Kubernetes installation](#agent-for-kubernetes-installation)\n- [Workspaces creation](#workspaces-creation)\n    - [Create the first workspaces](#create-the-first-workspaces)\n    - [Custom workspace container images](#custom-workspace-container-images)\n- [Tips](#tips)\n    - [Certificate management](#certificate-management)\n    - [Troubleshooting](#troubleshooting)\n    - [Contribute](#contribute)\n- [Share your feedback](#share-your-feedback)\n\n## Development environments on your infrastructure\nSecure, on-demand, cloud-based development workspaces are [available in beta for public projects](/blog/introducing-workspaces-beta/) for Premium and Ultimate customers. The first iteration allows you to bring your own infrastructure as a Kubernetes cluster. GitLab already deeply integrates with Kubernetes through the GitLab agent for Kubernetes, setting the foundation for configuration and cluster management.\n\nUsers can define and use a development environment template in a project. Workspaces in GitLab support the [devfile specification](https://docs.gitlab.com/ee/user/workspace/#devfile) as `.devfile.yaml` in the project repository root. The devfile attributes allow configuring of the workspace. For example, the `image` attribute specifies the container image to run and create the workspace in isolated container environments. The containers require a cluster orchestrator, such as Kubernetes, that manages resource usage and ensures data security and safety. Workspaces also need authorization: Project source code may contain sensitive intellectual property or otherwise confidential data in specific environments. The setup requires a GitLab OAuth application as the foundation here.\n\nThe following steps provide an in-depth setup guide for different cloud providers. If you prefer to set up your own environment, please follow the [documentation for workspace prerequisites](https://docs.gitlab.com/ee/user/workspace/#prerequisites). In general, we will practice the following steps:\n0. (Optional) Register a workspaces domain, and create TLS certificates.\n1. Create a Kubernetes cluster and configure access and requirements.\n2. Install an Ingress controller.\n3. Set up the workspaces proxy with the domain, TLS certificates, and OAuth app.\n4. Create a new GitLab group with a GitLab agent project. The agent can be used for all projects in that group.\n5. Install the GitLab agent for Kubernetes using the UI provided Helm chart command.\n6. Create an example project with a devfile configuration for workspaces.\n\nSome commands do not use the terminal indicator (`$` or `#`) to support easier copy-paste of command blocks into terminals.\n\n## Requirements\nThe steps in this blog post require the following CLI tools:\n1. `kubectl` and `helm` for Kubernetes\n2. `certbot` for Let's Encrypt\n3. git, curl, dig, openssl, and sslscan for troubleshooting\n\n### Workspaces domain\nWorkspaces require a domain with DNS entries. Cloud providers, for example, Google Cloud, also provide domain services which integrate more easily. You can also register and manage domains with your preferred provider.\n\nThe required DNS entries will be:\n- Wildcard DNS (`*.remote-dev.dev`) and hostname (`remote-dev.dev`) A/AAAA records pointing to the external Kubernetes external IP: `kubectl get services -A`\n- (Optional, with Let's Encrypt) ACME DNS challenge entries as TXT records\n\nAfter acquiring a domain, wait until the Kubernetes setup is ready and extract the A/AAAA records for the DNS settings. The following example shows how `remote-dev.dev` is configured in the Google Cloud DNS service.\n\n![GitLab remote development workspaces, example DNS configuration for remote-dev.dev](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_google_cloud_dns_remote-dev.dev-entries.png){: .shadow}\n\nExport shell variables that define the workspaces domains, and the email contact. These variables will be used in all setup steps below.\n\n```\nexport EMAIL=\"user@company.com\"\nexport GITLAB_WORKSPACES_PROXY_DOMAIN=\"remote-dev.dev\"\nexport GITLAB_WORKSPACES_WILDCARD_DOMAIN=\"*.remote-dev.dev\"\n```\n\n**Note:** This blog post will show the example domain `remote-dev.dev` for better understanding with a working example. The domain `remote-dev.dev` is maintained by the [Developer Evangelism team at GitLab](/handbook/marketing/developer-relations/developer-evangelism/projects/). There are no public demo environments available at the time of writing this blog post.\n\n### TLS certificates\nTLS certificates can be managed with different methods. To get started quickly, it is recommended to follow the [documentation steps](https://docs.gitlab.com/ee/user/workspace/#prerequisites) with Let's Encrypt and later consider production requirements with TLS certificates.\n\n```shell\ncertbot -d \"${GITLAB_WORKSPACES_PROXY_DOMAIN}\" \\\n  -m \"${EMAIL}\" \\\n  --config-dir ~/.certbot/config \\\n  --logs-dir ~/.certbot/logs \\\n  --work-dir ~/.certbot/work \\\n  --manual \\\n  --preferred-challenges dns certonly\n\n  certbot -d \"${GITLAB_WORKSPACES_WILDCARD_DOMAIN}\" \\\n  -m \"${EMAIL}\" \\\n  --config-dir ~/.certbot/config \\\n  --logs-dir ~/.certbot/logs \\\n  --work-dir ~/.certbot/work \\\n  --manual \\\n  --preferred-challenges dns certonly\n```\n\nThe Let's Encrypt CLI prompts you for the ACME DNS challenge. This requires setting TXT records for the challenge session immediately. Add the DNS records and specify a low TTL (time-to-live) of 300 seconds to update the records during the first steps.\n\n```\n_acme-challenge TXT \u003Cstringfromletsencryptacmechallenge>\n```\n\nYou can verify the DNS records using the `dig` CLI command.\n\n```shell\n$ dig _acme-challenge.remote-dev.dev txt\n...\n;; ANSWER SECTION:\n_acme-challenge.remote-dev.dev.\t246 IN\tTXT\t\"TlGRM9JGdXHGVklPWgytflxWDF82Sv04nF--Wl9JFvg\"\n_acme-challenge.remote-dev.dev.\t246 IN\tTXT\t\"CqG_54w6I0heWF3wLMAmUAitPcUMs9qAU9b8QhBWFj8\"\n```\n\nOnce the Let's Encrypt routine is complete, note the TLS certificate location.\n\n```\nSuccessfully received certificate.\nCertificate is saved at: /Users/mfriedrich/.certbot/config/live/remote-dev.dev/fullchain.pem\nKey is saved at:         /Users/mfriedrich/.certbot/config/live/remote-dev.dev/privkey.pem\nThis certificate expires on 2023-08-15.\nThese files will be updated when the certificate renews.\n\nSuccessfully received certificate.\nCertificate is saved at: /Users/mfriedrich/.certbot/config/live/remote-dev.dev-0001/fullchain.pem\nKey is saved at:         /Users/mfriedrich/.certbot/config/live/remote-dev.dev-0001/privkey.pem\nThis certificate expires on 2023-08-15.\nThese files will be updated when the certificate renews.\n```\n\nExport the TLS certificate paths into environment variables for the following setup steps.\n\n```shell\nexport WORKSPACES_DOMAIN_CERT=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}/fullchain.pem\"\nexport WORKSPACES_DOMAIN_KEY=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}/privkey.pem\"\n\nexport WILDCARD_DOMAIN_CERT=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}-0001/fullchain.pem\"\nexport WILDCARD_DOMAIN_KEY=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}-0001/privkey.pem\"\n```\n\n**Note**: If you prefer to use your certificates, please copy the files into a safe location, and export the environment variables with the path details.\n\n## GitLab OAuth application\n_After preparing the requirements, continue with the components setup._\n\nCreate a [group-owned OAuth application](https://docs.gitlab.com/ee/integration/oauth_provider.html) for the remote development workspaces group. Creating a centrally managed app with a service account or group with limited access is recommended for production use.\n\nNavigate into the group `Settings > Applications` and specify the following values:\n\n1. Name: `Remote Development workspaces by \u003Cresponsible team> - \u003Cdomain>`. Add the reponsible team that is trusted in your organization. For debugging, add the domain. There might be multiple authorization groups, this helps the identification which workspace domain is used.\n2. Redirect URI: `https://\u003CGITLAB_WORKSPACES_PROXY_DOMAIN>/auth/callback`. Replace `GITLAB_WORKSPACES_PROXY_DOMAIN` with the domain string value.\n3. Set the scopes to `api, read_user, openid, profile` .\n\n![GitLab remote development workspaces, OAuth application in the group settings](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_oauth_app_create.png){: .shadow}\n\nStore the OAuth application details in your password vault, and export them as shell environment variables for the next setup steps.\n\nCreate a configuration secret for the proxy as a signing key (`SIGNING_KEY`), and store it in a safe place (for example, use a secrets vault like 1Password to create and store the key).\n\n```\nexport CLIENT_ID=\"XXXXXXXXX\" # Look into password vault and set\nexport CLIENT_SECRET=\"XXXXXXXXXX\" # Look into password vault and set\nexport REDIRECT_URI=\"https://${GITLAB_WORKSPACES_PROXY_DOMAIN}/auth/callback\"\n\nexport GITLAB_URL=\"https://gitlab.com\" # Replace with your self-managed GitLab instance URL if not using GitLab.com SaaS\nexport SIGNING_KEY=\"a_random_key_consisting_of_letters_numbers_and_special_chars\" # Look into password vault and set\n```\n\n## Kubernetes cluster setup\nThe following sections describe how to set up a Kubernetes cluster in different cloud and on-premises environments and install an [ingress controller](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/) for HTTP access. After completing the Kubernetes setup, you can continue with the workspaces proxy and agent setup steps.\n\n**Choose one method to create a Kubernetes cluster. Note: Use `amd64` as platform architecture [until multi-architecture support is available for running workspaces](https://gitlab.com/groups/gitlab-org/-/epics/10594).** Cloud environments with Arm support will not work yet, for example AWS EKS on Graviton EC2 instances.\n\nYou should have defined the following variables from the previous setup steps:\n\n```sh\nexport EMAIL=\"user@company.com\"\nexport GITLAB_WORKSPACES_PROXY_DOMAIN=\"remote-dev.dev\"\nexport GITLAB_WORKSPACES_WILDCARD_DOMAIN=\"*.remote-dev.dev\"\n\nexport WORKSPACES_DOMAIN_CERT=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}/fullchain.pem\"\nexport WORKSPACES_DOMAIN_KEY=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}/privkey.pem\"\n\nexport WILDCARD_DOMAIN_CERT=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}-0001/fullchain.pem\"\nexport WILDCARD_DOMAIN_KEY=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}-0001/privkey.pem\"\n\nexport CLIENT_ID=\"XXXXXXXXX\" # Look into password vault and set\nexport CLIENT_SECRET=\"XXXXXXXXXX\" # Look into password vault and set\nexport REDIRECT_URI=\"https://${GITLAB_WORKSPACES_PROXY_DOMAIN}/auth/callback\"\n\nexport GITLAB_URL=\"https://gitlab.com\" # Replace with your self-managed GitLab instance URL if not using GitLab.com SaaS\nexport SIGNING_KEY=\"XXXXXXXX\" # Look into password vault and set\n\n```\n\n### Set up infrastructure with Google Kubernetes Engine (GKE)\n\n[Install and configure the Google Cloud SDK and `gcloud` CLI](https://cloud.google.com/sdk/docs/install?hl=en), and install the `gke-gcloud-auth-plugin` plugin to authenticate against Google Cloud.\n\n```shell\nbrew install --cask google-cloud-sdk\n\ngcloud components install gke-gcloud-auth-plugin\n\ngcloud auth login\n```\n\nCreate a new GKE cluster using the `gcloud` command, or follow the steps in the Google Cloud Console.\n\n```shell\n\nexport GCLOUD_PROJECT=group-community\nexport GCLOUD_CLUSTER=de-remote-development-1\n\ngcloud config set project $GCLOUD_PROJECT\n\n# Create cluster (modify for your needs)\ngcloud container clusters create $GCLOUD_CLUSTER \\\n    --release-channel stable \\\n    --zone us-central1-c \\\n    --project $GCLOUD_PROJECT\n\n# Verify cluster\ngcloud container clusters list\n\nNAME                     LOCATION         MASTER_VERSION   MASTER_IP       MACHINE_TYPE  NODE_VERSION       NUM_NODES  STATUS\nde-remote-development-1  us-central1-c    1.26.3-gke.1000  34.136.33.199   e2-medium     1.26.3-gke.1000    3          RUNNING\n\ngcloud container clusters get-credentials $GCLOUD_CLUSTER --zone us-central1-c --project $GCLOUD_PROJECT\nFetching cluster endpoint and auth data.\nkubeconfig entry generated for de-remote-development-1.\n```\n\n1. The setup requires the [`Kubernetes Engine Admin` role in Google IAM](https://cloud.google.com/kubernetes-engine/docs/concepts/access-control?hl=en#recommendations) to create ClusterRoleBindings.\n2. Create a new Kubernetes cluster (do not use Autopilot).\n3. Ensure that [cluster autoscaling](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-autoscaler?hl=en) is enabled in the GKE cluster.\n4. Verify that a [default Storage Class](https://cloud.google.com/kubernetes-engine/docs/concepts/persistent-volumes?hl=en#storageclasses) has been defined.\n5. Install an Ingress controller, for example [ingress-nginx](https://kubernetes.github.io/ingress-nginx/deploy/#gce-gke). Follow the documentation and run the following commands to install `ingress-nginx` into the Kubernetes cluster.\n\n```shell\nkubectl create clusterrolebinding cluster-admin-binding \\\n  --clusterrole cluster-admin \\\n  --user $(gcloud config get-value account)\n\nkubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.7.1/deploy/static/provider/cloud/deploy.yaml\n```\n\nPrint the external IP for the DNS records, and update wildcard DNS (`*.remote-dev.dev`) and hostname (`remote-dev.dev`).\n\n```shell\ngcloud container clusters list\n\nkubectl get services -A\n```\n\n### Set up infrastructure with Amazon Elastic Kubernetes Service (EKS)\nCreating an Amazon EKS cluster requires [cluster IAM roles](https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html). You can the [`eksctl` CLI for Amazon EKS](https://eksctl.io/), which automatically creates the roles. `eksctl` [requires the AWS IAM Authenticator for Kubernetes](https://github.com/weaveworks/eksctl/blob/main/README.md#prerequisite), which will get pulled with Homebrew automatically on macOS.\n\n```shell\nbrew install eksctl awscli aws-iam-authenticator\naws configure\n\neksctl create cluster --name remote-dev \\\n    --region us-west-2 \\\n    --node-type m5.xlarge \\\n    --nodes 3 \\\n    --nodes-min=1 \\\n    --nodes-max=4 \\\n    --version=1.26 \\\n    --asg-access\n```\n\nThe eksctl command uses the [`--asg-access`, `--nodes-min/max` parameters for auto-scaling](https://eksctl.io/usage/autoscaling/). The autoscaler requires [additional configuration steps](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md), alternatively [Karpenter is supported in Amazon EKS](https://karpenter.sh/docs/getting-started/getting-started-with-karpenter/). Review the [autoscaling documentation](https://docs.aws.amazon.com/eks/latest/userguide/autoscaling.html), and [default Storage Class `gp2`](https://docs.aws.amazon.com/eks/latest/userguide/storage-classes.html) fulfilling the requirements. The Kubernetes configuration is automatically updated locally.\n\nInstall the [Nginx Ingress controller for EKS](https://kubernetes.github.io/ingress-nginx/deploy/#aws). Follow the documentation and run the following command to install `ingress-nginx` into the Kubernetes cluster.\n\n```shell\nkubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.0/deploy/static/provider/aws/deploy.yaml\n```\n\nPrint the external IP for the DNS records, and update wildcard DNS (`*.remote-dev.dev`) and hostname (`remote-dev.dev`).\n\n```shell\neksctl get cluster --region us-west-2 --name remote-dev\n\nkubectl get services -A\n```\n\n### Set up infrastructure with Azure Managed Kubernetes Service (AKS)\nInstall [Azure CLI](https://learn.microsoft.com/en-us/azure/aks/learn/quick-kubernetes-deploy-cli).\n\n```shell\nbrew install azure-cli\n\naz login\n```\n\nReview the documentation for the [cluster autoscaler in AKS](https://learn.microsoft.com/en-us/azure/aks/cluster-autoscaler) and the [default Storage Class being `managed-csi`](https://learn.microsoft.com/en-us/azure/aks/concepts-storage#storage-classes), create a new resource group, and create a new Kubernetes cluster. Download the Kubernetes configuration to continue with the `kubectl` commands.\n\n```shell\naz group create --name remote-dev-rg --location eastus\n\naz aks create \\\n--resource-group remote-dev-rg \\\n--name remote-dev \\\n--node-count 1 \\\n--vm-set-type VirtualMachineScaleSets \\\n--load-balancer-sku standard \\\n--enable-cluster-autoscaler \\\n--min-count 1 \\\n--max-count 3\n\naz aks get-credentials --resource-group remote-dev-rg --name remote-dev\n```\n\nInstall the [Nginx ingress controller in AKS](https://learn.microsoft.com/en-us/azure/aks/ingress-basic?tabs=azure-cli#basic-configuration). Follow the documentation and run the following commands to install `ingress-nginx` into the Kubernetes cluster.\n\n```shell\nNAMESPACE=ingress-basic\n\nhelm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx\nhelm repo update\n\nhelm install ingress-nginx ingress-nginx/ingress-nginx \\\n  --create-namespace \\\n  --namespace $NAMESPACE \\\n  --set controller.service.annotations.\"service\\.beta\\.kubernetes\\.io/azure-load-balancer-health-probe-request-path\"=/healthz\n```\n\nPrint the external IP for the DNS records, and update wildcard DNS (`*.remote-dev.dev`) and hostname (`remote-dev.dev`).\n\n```shell\nkubectl get services --namespace ingress-basic -o wide -w ingress-nginx-controller\n\nkubectl get services -A\n```\n\n### Set up infrastructure with Civo Cloud Kubernetes\nInstall and configure the [Civo CLI](https://www.civo.com/docs/kubernetes/create-a-cluster#creating-a-cluster-using-civo-cli), and create a Kubernetes cluster using 2 nodes, 4 CPUs, 8 GB RAM.\n\n```shell\ncivo kubernetes create remote-dev -n 2 -s g4s.kube.large\n\ncivo kubernetes config remote-dev --save\nkubectl config use-context remote-dev\n```\n\nYou have full permissions on the cluster to create ClusterRoleBindings. The [default Storage Class](https://www.civo.com/docs/kubernetes/kubernetes-volumes#creating-a-persistent-volume-claim-pvc) is set to 'civo-volume'.\n\nInstall the [Nginx Ingress controller using Helm](https://kubernetes.github.io/ingress-nginx/deploy/#quick-start). Follow the documentation and run the following command to install `ingress-nginx` into the Kubernetes cluster.\n\n```shell\nhelm upgrade --install ingress-nginx ingress-nginx \\\n  --repo https://kubernetes.github.io/ingress-nginx \\\n  --namespace ingress-nginx --create-namespace\n\n```\n\nPrint the external IP for the DNS records, and update wildcard DNS (`*.remote-dev.dev`) and hostname (`remote-dev.dev`).\n\n```shell\ncivo kubernetes show remote-dev\n\nkubectl get services -A\n```\n\n### Set up infrastructure with self-managed Kubernetes\nThe process follows similar steps, requiring a user with permission to create `ClusterRoleBinding` resources. The [Nginx Ingress controller](https://kubernetes.github.io/ingress-nginx/deploy/#quick-start) is the fastest path forward. Once the cluster is ready, print the load balancer IP for the DNS records, and create/update A/AAAA record for wildcard DNS (`*.remote-dev.dev`) and hostname (`remote-dev.dev`) pointing to the load balancer IP.\n\n## Workspaces proxy installation into Kubernetes\n_After completing the Kubernetes cluster setup with one of your preferred providers, please continue with the next steps._\n\nAdd the Helm repository for the workspaces proxy (it is using the [Helm charts feature in the GitLab package registry](https://docs.gitlab.com/ee/user/packages/helm_repository/)).\n\n```shell\nhelm repo add gitlab-workspaces-proxy \\\n  https://gitlab.com/api/v4/projects/gitlab-org%2fremote-development%2fgitlab-workspaces-proxy/packages/helm/devel\n```\n\nInstall the gitlab-workspaces-proxy, and optionally [specify the most current chart version](https://gitlab.com/gitlab-org/remote-development/gitlab-workspaces-proxy/-/blob/main/helm/Chart.yaml). If you are using a different ingress controller than Nginx, you need to change the `ingress.className` key. Re-run the command when new TLS certificates need to be installed.\n\n```shell\nhelm repo update\n\nhelm upgrade --install gitlab-workspaces-proxy \\\n  gitlab-workspaces-proxy/gitlab-workspaces-proxy \\\n  --version 0.1.6 \\\n  --namespace=gitlab-workspaces \\\n  --create-namespace \\\n  --set=\"auth.client_id=${CLIENT_ID}\" \\\n  --set=\"auth.client_secret=${CLIENT_SECRET}\" \\\n  --set=\"auth.host=${GITLAB_URL}\" \\\n  --set=\"auth.redirect_uri=${REDIRECT_URI}\" \\\n  --set=\"auth.signing_key=${SIGNING_KEY}\" \\\n  --set=\"ingress.host.workspaceDomain=${GITLAB_WORKSPACES_PROXY_DOMAIN}\" \\\n  --set=\"ingress.host.wildcardDomain=${GITLAB_WORKSPACES_WILDCARD_DOMAIN}\" \\\n  --set=\"ingress.tls.workspaceDomainCert=$(cat ${WORKSPACES_DOMAIN_CERT})\" \\\n  --set=\"ingress.tls.workspaceDomainKey=$(cat ${WORKSPACES_DOMAIN_KEY})\" \\\n  --set=\"ingress.tls.wildcardDomainCert=$(cat ${WILDCARD_DOMAIN_CERT})\" \\\n  --set=\"ingress.tls.wildcardDomainKey=$(cat ${WILDCARD_DOMAIN_KEY})\" \\\n  --set=\"ingress.className=nginx\"\n```\n\nThe chart installs and configures the ingress automatically. You can verify the setup by getting the `Ingress` resource type:\n\n```shell\nkubectl get ingress -n gitlab-workspaces\n\nNAME                      CLASS   HOSTS                             ADDRESS   PORTS     AGE\ngitlab-workspaces-proxy   nginx   remote-dev.dev,*.remote-dev.dev             80, 443   9s\n```\n\n### Agent for Kubernetes installation\nCreate the agent configuration file in `.gitlab/agents/\u003Cagentname>/config.yaml`, add to git, and push it into the repository. The `remote_development` key specifies the `dns_zone`, which must be set to the workspaces domain. Additionally, the integration needs to be enabled. The `observability` key intentionally configures [debug logging](https://docs.gitlab.com/ee/user/clusters/agent/work_with_agent.html#debug-the-agent) for the first setup to troubleshoot faster. You can adjust the `logging` levels for production usage.\n\n```shell\nexport GL_AGENT_K8S=remote-dev-dev\n\n$ mkdir agent-kubernetes && cd agent-kubernetes\n$ mkdir -p .gitlab/agents/${GL_AGENT_K8S}/\n\n$ cat \u003C\u003CEOF >.gitlab/agents/${GL_AGENT_K8S}/config.yaml\nremote_development:\n    enabled: true\n    dns_zone: \"${GITLAB_WORKSPACES_PROXY_DOMAIN}\"\n\nobservability:\n  logging:\n    level: debug\n    grpc_level: warn\nEOF\n\n$ git add .gitlab/agents/${GL_AGENT_K8S}/config.yaml\n$ git commit -avm \"Add agent for Kubernetes configuration\"\n# adjust the URL to your GitLab server URL and project path\n$ git remote add origin https://gitlab.example.com/remote-dev-workspaces/agent-kubernetes.git\n# will create a private project when https/PAT is used\n$ git push\n```\n\nOpen the GitLab project in your browser, navigate into `Operate > Kubernetes Clusters`, and click the `Connect a new cluster (agent)` button. Select the agent from the configuration dropdown, and click `Register`. The form generates a ready-to-use Helm chart CLI command. Similar to the command below, replace `XXXXXXXXXXREPLACEME` with the actual token value.\n\n```shell\nhelm repo add gitlab https://charts.gitlab.io\nhelm repo update\nhelm upgrade --install remote-dev-dev gitlab/gitlab-agent \\\n    --namespace gitlab-agent-remote-dev-dev \\\n    --create-namespace \\\n    --set image.tag=v16.0.1 \\\n    --set config.token=XXXXXXXXXXREPLACEME \\\n    --set config.kasAddress=wss://kas.gitlab.com # Replace with your self-managed GitLab KAS instance URL if not using GitLab.com SaaS\n```\n\nRun the commands, and verify that the agent is connected in the `Operate > Kubernetes Clusters` overview. You can access the pod logs using the following command:\n\n```shell\n$ kubectl get ns\nNAME                          STATUS   AGE\ngitlab-agent-remote-dev-dev   Active   9d\ngitlab-workspaces             Active   22d\n...\n\n$ kubectl logs -f -l app.kubernetes.io/name=gitlab-agent -n gitlab-agent-$GL_AGENT_K8S\n```\n\n_Congrats! Your infrastructure setup for on-demand, cloud-based development environments is complete._\n\n## Workspaces creation\nAfter completing the infrastructure setup, you must verify that all components work together and users can create workspaces. You can fork or import the [`example-python-http-simple` project](https://gitlab.com/gitlab-de/use-cases/remote-development/example-python-http-simple) into your GitLab group with access to the GitLab agent for Kubernetes to try it immediately. The project provides a simple Python web app with Flask that provides different HTTP routes. Alternatively, start with a new project and create a `.devfile.yaml` with the [example configuration](https://docs.gitlab.com/ee/user/workspace/#example-configurations).\n\nOptional: Inspect the [`.devfile.yaml`](https://docs.gitlab.com/ee/user/workspace/#devfile) file to learn about the configuration format. We will look into the `image` key later.\n\n```yaml\nschemaVersion: 2.2.0\ncomponents:\n  - name: py\n    attributes:\n      gl/inject-editor: true\n    container:\n      # Use a custom image that supports arbitrary user IDs.\n      # NOTE: THIS IMAGE IS NOT ACTIVELY MAINTAINED. DEMO USE CASES ONLY, DO NOT USE IN PRODUCTION.\n      # Source: https://gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id\n      image: registry.gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id:latest\n      memoryRequest: 1024M\n      memoryLimit: 2048M\n      cpuRequest: 500m\n      cpuLimit: 1000m\n      endpoints:\n        - name: http-python\n          targetPort: 8080\n```\n\n### Create the first workspaces\nNavigate to the `Your Work > Workspaces` menu and create a new workspace. Search for the project name, select the agent for Kubernetes, and create the workspace.\n\n![GitLab remote development workspaces, Python example](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_python.png){: .shadow}\n\nOpen two terminals to follow the workspaces proxy and agent logs in the Kubernetes cluster.\n\n```shell\n$ kubectl logs -f -l app.kubernetes.io/name=gitlab-workspaces-proxy -n gitlab-workspaces\n\n{\"level\":\"info\",\"ts\":1686331102.886607,\"caller\":\"server/server.go:74\",\"msg\":\"Starting proxy server...\"}\n{\"level\":\"info\",\"ts\":1686331133.146862,\"caller\":\"upstream/tracker.go:47\",\"msg\":\"New upstream added\",\"host\":\"8080-workspace-62029-5534214-2vxdxq.remote-dev.dev\",\"backend\":\"workspace-62029-5534214-2vxdxq.gl-rd-ns-62029-5534214-2vxdxq\",\"backend_port\":8080}\n2023/06/09 17:21:10 getHostnameFromState state=https://60001-workspace-62029-5534214-2vxdxq.remote-dev.dev/folder=/projects/demo-python-http-simple\n```\n\n```shell\n$ kubectl logs -f -l app.kubernetes.io/name=gitlab-agent -n gitlab-agent-$GL_AGENT_K8S\n\n{\"level\":\"debug\",\"time\":\"2023-06-09T18:36:19.839Z\",\"msg\":\"Applied event\",\"mod_name\":\"remote_development\",\"apply_event\":\"WaitEvent{ GroupName: \\\"wait-0\\\", Status: \\\"Pending\\\", Identifier: \\\"gl-rd-ns-62029-5534214-k66cjy_workspace-62029-5534214-k66cjy-gl-workspace-data__PersistentVolumeClaim\\\" }\",\"agent_id\":62029}\n{\"level\":\"debug\",\"time\":\"2023-06-09T18:36:19.866Z\",\"msg\":\"Received update event\",\"mod_name\":\"remote_development\",\"workspace_namespace\":\"gl-rd-ns-62029-5534214-k66cjy\",\"workspace_name\":\"workspace-62029-5534214-k66cjy\",\"agent_id\":62029}\n{\"level\":\"debug\",\"time\":\"2023-06-09T18:36:43.627Z\",\"msg\":\"Applied event\",\"mod_name\":\"remote_development\",\"apply_event\":\"WaitEvent{ GroupName: \\\"wait-0\\\", Status: \\\"Successful\\\", Identifier: \\\"gl-rd-ns-62029-5534214-k66cjy_workspace-62029-5534214-k66cjy_apps_Deployment\\\" }\",\"agent_id\":62029}\n```\n\nWait until the workspace is provisioned successfully, and click to open the HTTP URL, example format `https://60001-workspace-62029-5534214-2vxdxq.remote-dev.dev/?folder=%2Fprojects%2Fexample-python-http-simple`. The GitLab OAuth application will ask you for authorization.\n\n![GitLab OAuth provider app, example with the Developer Evangelism demo environment](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_oauth_app.png){: .shadow}\n\nYou can select the Web IDE menu, open a new terminal (`cmd shift p` and search for `terminal create`). More shortcuts and Web IDE usage are documented [here](https://docs.gitlab.com/ee/user/project/web_ide/).\n\n![GitLab remote development workspaces, Python example, create terminal](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_python_web_ide_create_terminal.png){: .shadow}\n\nUsing the Python example project, try to run the `hello.py` file with the Python interpreter after changing the terminal to `bash` to access auto-completion and shell history. Type `pyth`, press tab, type `hel`, press tab, enter.\n\n```shell\n$ bash\n\n$ python hello.py\n```\n\nThe command will fail because the Python requirements still need to be installed. Let us fix that by running the following command:\n\n```shell\n$ pip install -r requirements.txt\n```\n\n![GitLab remote development workspaces, Python example, install requirements in the terminal](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_python_web_ide_terminal_install_pip.png){: .shadow}\n\n**Note**: This example is intentionally kept simple, and does not use best practices with `pyenv` for managing Python environments. We will explore development environment templates in future blog posts.\n\nRun the Python application `hello.py` again to start the web server on port 8080.\n\n```shell\n$ python hello.py\n```\n\nYou can access the exposed port by modifying the URL from the default port at the beginning of the URL to the exposed port `8080`. The `?folder` URL parameter can also be removed.\n\n```diff\n-https://60001-workspace-62029-5534214-kbtcmq.remote-dev.dev/?folder=/projects/example-python-http-simple\n+https://8080-workspace-62029-5534214-kbtcmq.remote-dev.dev/\n```\n\nThe URL is not publicly available and requires access through the GitLab OAuth session.\n\n![GitLab remote development workspaces, Python example, run webserver, access HTTP](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_python_web_ide_terminal_run_webserver_access_http.png){: .shadow}\n\nModifying the workspace requires custom container images supporting to run with [arbitrary user IDs](https://docs.gitlab.com/ee/user/workspace/#arbitrary-user-ids). The example project uses a custom image which allows to install Python dependencies and create build artifacts. It also allows to use the bash terminal shown above. Learn more about custom image creation in the next section.\n\n### Custom workspace container images\nCustom container images require support for [arbitrary user IDs](https://docs.gitlab.com/ee/user/workspace/#arbitrary-user-ids). You can build custom container images with [GitLab CI/CD](/solutions/continuous-integration/) and use the [GitLab container registry](https://docs.gitlab.com/ee/user/packages/container_registry/) to distribute the container images on the DevSecOps platform.\n\nWorkspaces run with arbitrary user IDs in the Kubernetes cluster containers and manage resource access with Linux group permissions. Existing container images may need to be changed, and imported as base image for new container images. The [following example](https://gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id/-/blob/main/Dockerfile) uses the `python:3.11-slim-bullseye` image from Docker Hub as a base container image in the `FROM` key. The next steps create and set a home directory in `/home/gitlab-workspaces`, and manage user and group access to specified directories. Additionally, you can install more convenience tools and configurations into the image, for example the `git` package.\n\n[`Dockerfile`](https://gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id/-/blob/main/Dockerfile)\n```\n# Example demo for a Python-based container image.\n# NOTE: THIS IMAGE IS NOT ACTIVELY MAINTAINED. DEMO USE CASES ONLY, DO NOT USE IN PRODUCTION.\n\nFROM python:3.11-slim-bullseye\n\n# User id for build time. Runtime will be an arbitrary random ID.\nRUN useradd -l -u 33333 -G sudo -md /home/gitlab-workspaces -s /bin/bash -p gitlab-workspaces gitlab-workspaces\n\nENV HOME=/home/gitlab-workspaces\n\nWORKDIR $HOME\n\nRUN mkdir -p /home/gitlab-workspaces && chgrp -R 0 /home && chmod -R g=u /etc/passwd /etc/group /home\n\n# TODO: Add more convenience tools into the user home directory, i.e. enable color prompt for the terminal, install pyenv to manage Python environments, etc\nRUN apt update && \\\n    apt -y --no-install-recommends install git procps findutils htop vim curl wget && \\\n    rm -rf /var/lib/apt/lists/*\n\nUSER gitlab-workspaces\n```\n\n **As an exercise**, [fork the project](https://gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id) and modify the package installation step in the `Dockerfile` file to install the `dnsutils` package on the Debian based image to get access to the `dig` command.\n\n[`Dockerfile`](https://gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id/-/blob/main/Dockerfile)\n```diff\n-RUN apt update && \\\n-    apt -y --no-install-recommends install git procps findutils htop vim curl wget && \\\n-    rm -rf /var/lib/apt/lists/*\n+RUN apt update && \\\n+    apt -y --no-install-recommends install git procps findutils htop vim curl wget dnsutils && \\\n+    rm -rf /var/lib/apt/lists/*\n```\n\n[Build the container image](https://docs.gitlab.com/ee/ci/docker/using_docker_build.html) with your preferred CI/CD workflow. On GitLab.com SaaS, you can include the `Docker.gitlab-ci.yml` template which takes care of building the image.\n\n```yaml\ninclude:\n    - template: Docker.gitlab-ci.yml\n```\n\nWhen building the container images manually, use Linux and `amd64` as platform architecture [until multi-architecture support is available for running workspaces](https://gitlab.com/groups/gitlab-org/-/epics/10594). Also, review the [optimizing images guide in the documentation](https://docs.gitlab.com/ee/ci/pipelines/pipeline_efficiency.html#optimize-docker-images) when creating custom container images to optimize size and build times.\n\nNavigate into `Deploy > Container Registry` in the GitLab UI and copy the image URL from the tagged image. Open the `.devfile.yaml` file in the forked GitLab project `example-python-http-simple`, and change the `image` path to the newly built image URL.\n\n[`.devfile.yaml`](https://gitlab.com/gitlab-de/use-cases/remote-development/example-python-http-simple/-/blob/main/.devfile.yaml)\n```diff\n-      image: registry.gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id:latest\n+      image: registry.gitlab.example.com/remote-dev-workspaces/python-remote-dev-workspaces-user-id:latest\n```\n\nNavigate into `Your Work > Workspaces` and create a new workspace for the project, and try to execute the `dig` command to query the IPv6 address of GitLab.com (or any other internal domain).\n\n```shell\n$ dig +short gitlab.com AAAA\n```\n\nThe custom container image project is located [here](https://gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id/).\n\n## Tips\nThis blog post's setup steps with environment variables are easy to follow. For production usage, use automation to manage your environment with Terraform, Ansible, etc.\n\n- Terraform: [Provision a GKE Cluster (Google Cloud)](https://developer.hashicorp.com/terraform/tutorials/kubernetes/gke), [Provision an EKS Cluster (AWS)](https://developer.hashicorp.com/terraform/tutorials/kubernetes/eks), [Provision an AKS Cluster (Azure)](https://developer.hashicorp.com/terraform/tutorials/kubernetes/aks), [Deploy Applications with the Helm Provider](https://developer.hashicorp.com/terraform/tutorials/kubernetes/helm-provider)\n- Ansible: [google.cloud.gcp_container_cluster module](https://docs.ansible.com/ansible/latest/collections/google/cloud/gcp_container_cluster_module.html), [community.aws.eks_cluster module](https://docs.ansible.com/ansible/latest/collections/community/aws/eks_cluster_module.html), [azure.azcollection.azure_rm_aks module](https://docs.ansible.com/ansible/latest/collections/azure/azcollection/azure_rm_aks_module.html), [kubernetes.core collection](https://docs.ansible.com/ansible/latest/collections/kubernetes/core/index.html#plugin-index)\n\n### Certificate management\nThe workspaces domain requires a valid TLS certificate. The examples above used certbot with Let's Encrypt, requiring a certificate renewal after three months. Depending on your corporate requirements, you may need to create TLS certificates signed by the corporate CA identity and manage the certificates. Alternatively, you can look into solutions like [cert-manager for Kubernetes](https://cert-manager.io/docs/getting-started/) that will help renew certificates automatically.\n\nDo not forget to add TLS certificate validity monitoring to avoid unforeseen errors. The [blackbox exporter for Prometheus](https://github.com/prometheus/blackbox_exporter) can help with monitoring TLS certificate expiry and send alerts.\n\n### Troubleshooting\nHere are a few tips for troubleshooting connections and inspecting the cluster resources.\n\n#### Verify the connections\nTry to connect to the workspaces domain to see whether the Kubernetes Ingress controller responds to HTTP requests.\n\n```shell\n$ curl -vL ${GITLAB_WORKSPACES_PROXY_DOMAIN}\n```\n\nInspect the logs of the proxy deployment to follow connection requests. Since the proxy requires an authorization token sent via the OAuth app, an HTTP 400 error is expected for unauthenticated curl requests.\n\n```shell\n$ kubectl logs -f -l app.kubernetes.io/name=gitlab-workspaces-proxy -n gitlab-workspaces\n```\n\nCheck if the TLS certificate is valid. You can also use `sslcan` and other tools.\n\n```shell\n$ openssl s_client -connect ${GITLAB_WORKSPACES_PROXY_DOMAIN}:443\n\n$ sslcan ${GITLAB_WORKSPACES_PROXY_DOMAIN}\n```\n\n[Debug the agent for Kubernetes](https://docs.gitlab.com/ee/user/clusters/agent/work_with_agent.html#debug-the-agent) and inspect the pod logs.\n\n```shell\n$ kubectl get ns\n\n$ kubectl logs -f -l app.kubernetes.io/name=gitlab-agent -n gitlab-agent-\u003CNAMESPACENAME>\n```\n\n#### Workspaces cannot be created even if the agent is connected\nWhen the workspaces deployment is spinning and nothing happens, try restarting the workspaces proxy and agent for Kubernetes. This is a known problem and tracked [in this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/414399#note_1426652421).\n\n```shell\n$ kubectl rollout restart deployment -n gitlab-workspaces\n\n$ kubectl rollout restart deployment -n gitlab-agent-$GL_AGENT_K8S\n```\n\nIf the agent for Kubernetes remains unresponsive, consider a complete reinstall. First, navigate into the GitLab UI into `Operate > Kubernetes Clusters` and [delete the agent](https://docs.gitlab.com/ee/user/clusters/agent/work_with_agent.html#remove-an-agent-through-the-gitlab-ui). Next, use the following commands to delete the Helm release from the cluster, and run the installation command generated from the UI again.\n\n```shell\nkubectl get ns\nhelm list -A\n\nexport RELEASENAME=xxx\nexport NAMESPACENAME=xxx\nexport TOKEN=XXXXXXXXXXREPLACEME\nhelm uninstall ${RELEASENAME} -n gitlab-agent-${NAMESPACENAME}\n\nhelm repo add gitlab https://charts.gitlab.io\nhelm repo update\n\nhelm upgrade --install ${RELEASENAME} gitlab/gitlab-agent \\\n    --namespace gitlab-agent-${NAMESPACENAME} \\\n    --create-namespace \\\n    --set image.tag=v16.1.2 \\\n    --set config.token=${TOKEN} \\\n    --set config.kasAddress=wss://kas.gitlab.com # Replace with your self-managed GitLab KAS instance URL if not using GitLab.com SaaS\n```\n\nExample: `helm uninstall remote-dev-dev -n gitlab-agent-remote-dev-dev`\n\n#### Cannot modify workspace using custom images\nIf you cannot modify the workspace, open a new terminal and check the user id and their groups.\n\n```shell\n$ id\n```\n\nInspect the `.devfile.yaml` file in the project and extract the `image` attribute to test the used container image. You can use container CLI, for example `docker` that runs the container with a different user ID. Note: You can use any user ID to test the behavior.\n\nTip: Use grep and cut commands to extract the image attribute URL from the `.devfile.yaml`.\n\n```shell\n$ cat .devfile.yaml | grep image: | cut -f2 -d ':')\n```\n\nRun the following command to execute the `id` command in the container, and print the user information.\n\n```\n$ docker run -u 1234 -ti registry.gitlab.com/path/to/project/image:tagname id\n```\n\nTry to modify the workspace by running the command `echo 'Hi' >> ~/example.md`. This can fail with a permission error.\n\n```shell\n$ docker run -u 1234 -ti registry.gitlab.com/path/to/project/image:tagname echo 'Hi' >> ~/example.md\n```\n\nIf the above command failed, the Linux user group does not have enough permissions to modify the file. You can view the permissions using the `ls` command.\n\n```shell\n$ docker run -u 1234 -ti registry.gitlab.com/path/to/project/image:tagname ls -lart ~/\n```\n\n### Contribute\nThe [remote development developer documentation](https://gitlab.com/gitlab-org/remote-development/gitlab-remote-development-docs) provides insights into the [architecture blueprint](https://docs.gitlab.com/ee/architecture/blueprints/remote_development/) and how to set up a local development environment to [start contributing](/community/contribute/). In the future, we will be able to use remote development workspaces to develop remote development workspaces.\n\n## Share your feedback\nIn this blog post, you have learned how to manage the infrastructure for remote development workspaces, create your first workspace, and more tips on custom workspace images and troubleshooting. Using the same development environment across organizations and communities, developers can focus on writing code and get fast preview feedback (i.e., by running a web server that can be accessed externally in the remote workspace). Providing the same reproducible environment also helps opensource contributors to reproduce bugs and provide feedback most efficiently. They can use the same best practices as upstream maintainers.\n\nDevelopers and DevOps engineers will be using the Web IDE in workspaces. Later, being able to [connect their desktop client to workspaces](https://gitlab.com/groups/gitlab-org/-/epics/10478), they can take advantage of even more efficiency with the [most comprehensive AI-powered DevSecOps platform](/gitlab-duo/): Code suggestions and more AI-powered workflows are just one fingertip away.\n\nWhat will your teams build with remote development workspaces? Please share your experiences in the [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/410031), blog about your setup, and join our [community forum](https://forum.gitlab.com/) for more discussions.\n\nCover image by [Nick Karvounis](https://unsplash.com/@nickkarvounis) on [Unsplash](https://unsplash.com/photos/SmIM3m8f3Pw)",[480,1508,894,727,873],"careers",{"slug":1510,"featured":6,"template":678},"set-up-infrastructure-for-cloud-development-environments","content:en-us:blog:set-up-infrastructure-for-cloud-development-environments.yml","Set Up Infrastructure For Cloud Development Environments","en-us/blog/set-up-infrastructure-for-cloud-development-environments.yml","en-us/blog/set-up-infrastructure-for-cloud-development-environments",{"_path":1516,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1517,"content":1523,"config":1528,"_id":1530,"_type":16,"title":1531,"_source":17,"_file":1532,"_stem":1533,"_extension":20},"/en-us/blog/introducing-ci-components",{"title":1518,"description":1519,"ogTitle":1518,"ogDescription":1519,"noIndex":6,"ogImage":1520,"ogUrl":1521,"ogSiteName":692,"ogType":693,"canonicalUrls":1521,"schema":1522},"Introducing CI/CD components and how to use them in GitLab","Learn the main benefits for using CI/CD components in your CI/CD pipelines and how to achieve them.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667676/Blog/Hero%20Images/buildingblocks.jpg","https://about.gitlab.com/blog/introducing-ci-components","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing CI/CD components and how to use them in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2023-07-10\",\n      }",{"title":1518,"description":1519,"authors":1524,"heroImage":1520,"date":1525,"body":1526,"category":14,"tags":1527},[1020],"2023-07-10","\nWelcome to the third blog in our series on GitLab's CI/CD components! If you haven't already, we encourage you to read \"[How to build reusable CI/CD templates](https://about.gitlab.com/blog/how-to-build-reusable-ci-templates/)\" and \"[Use inputs in includable files](https://about.gitlab.com/blog/use-inputs-in-includable-files/)\" to gain a comprehensive understanding of these exciting new capabilities. In this blog post, we'll dive in and explore the power of GitLab's CI/CD components in revolutionizing CI/CD workflows. We'll also provide a glimpse into the future of GitLab's CI/CD ecosystem, including the upcoming release of the [CI/CD catalog](https://docs.gitlab.com/ee/architecture/blueprints/ci_pipeline_components/), a framework containing a collection of these components. With these moves, GitLab is taking a significant step towards streamlining pipeline configurations and enhancing reusability.\n\n### CI/CD components\nIn [GitLab 16.1](https://about.gitlab.com/releases/2023/06/22/gitlab-16-1-released/), an exciting experimental feature called CI/CD components was introduced. CI/CD components are reusable, single-purpose building blocks that abstract away pipeline configuration units.\n\nBy leveraging the power of CI/CD components, users can unlock several key benefits:\n1. **Reusability and abstraction.** CI/CD components allow pipelines to be assembled using abstractions instead of defining all the details in one place. With components encapsulating implementation details, developers can focus on composing pipelines using pre-built, reusable blocks. This approach promotes modularity, code reusability, and simplifies pipeline maintenance.\n2. **Flexibility with input.** Components support input parameters, enabling customization based on pipeline contexts, making them adaptable and reusable across various pipeline stages. Developers gain the ability to build a dynamic CI/CD catalog that is tagged and versioned, providing better control and compatibility. Developers can reference specific component versions, ensuring stability and reproducibility. By leveraging version tags, teams can maintain consistency in their pipelines while easily upgrading to newer versions when desired.\n4. **High-quality standards through testing.** Testing components as part of the development workflow to ensure quality maintains high standards is strongly recommended. By incorporating testing into the CI/CD process, developers can verify the reliability and functionality of components, identify and fix issues early on, and deliver more robust and dependable pipelines.\n5. **The CI/CD catalog.** A centralized repository of components, the CI/CD catalog is set to be released soon, and will act as a treasure trove of components curated to cover a wide range of use cases. This centralized repository offers developers a one-stop shop for discovering, integrating, and sharing components. Teams can benefit from a growing catalog of pre-built, quality-tested components, saving time and effort in configuring their pipelines.\n\nIn the previous blog posts, we discussed the main benefits for the first two points (which are also available with CI/CD templates), but now let's dig deeper into components and how they could revolutionize the way you construct your pipelines.\n\n### Testing a CI/CD component\nAs software development continues to evolve, ensuring the reliability and quality of code components becomes increasingly vital.\n\nOne of the main benefits of using components is the ability to thoroughly test components before software is officially released, enabling a more robust and streamlined development process. In our context, a released component is versioned and will follow a structured syntax, allowing for seamless integration within pipelines. \n\n```yaml\ninclude:\n  - components: /path/to/project@\u003Cversion> \n```\nOne of the unique benefits of our CI/CD components is the flexibility they offer. DevSecOps teams can opt in for an \"unofficial\" release by appending `@commit_SHA`, allowing them to experiment and iterate on their code before making it an official release.\n\n```yaml\ninclude:\n  - components: /path/to/project@\u003Ccommit_SHA> \n```\nTo make a component an official release, users must tag it, essentially creating a versioned snapshot. The tagged release will then be made available in our comprehensive CI/CD catalog (launching soon), providing users with easy access to a range of thoroughly tested and approved components. To ensure the stability and reliability of your CI/CD components, it is crucial to thoroughly test them. DevSecOps teams can leverage the power of our pipeline by utilizing the commit_SHA identifier to run comprehensive tests. If the pipeline successfully passes all tests, they can proceed to tag the component, signifying its readiness for release.\n\nBy configuring a release job based on the tagged version, DevSecOps teams can confidently incorporate the official component into their projects, knowing that it has undergone testing and validation. To learn more about how to test components, you can check out our [documentation](https://docs.gitlab.com/ee/ci/components/#test-a-component) or watch this walkthrough video:\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe width=\"1870\" height=\"937\" src=\"https://www.youtube.com/embed/Vw8-ce8LNBs\" title=\"\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\u003C/figure>\n\n### Versioning and tagging\nAs mentioned in the previous section, DevSecOps teams can leverage the `@version` or the `@commit_SHA` to refer to a component in their pipeline. Another option to refer to a component is by leveraging the `@latest`.\n\n```yaml\ninclude:\n  - components: /path/to/project@latest\n```\nThis will use the latest official (tagged) available components. When used in a pipeline in combination with reliable tests, you can guarantee that your components used in a pipeline will always be tested and verified.\n\n### On the horizon: CI/CD catalog\nOne of the biggest benefits of using components is yet to be seen and will be available with the launch of our CI/CD catalog. The catalog will allow users to search, find, and understand how to use components that are available across their organization, setting a framework for them to collaborate on pipeline constructs so that they can be evolved and improved over time. Stay tuned!\n\n### Dogfooding components \nAt GitLab, we believe in [dogfooding our own product](https://handbook.gitlab.com/handbook/values/#dogfooding). To demonstrate the power and practicality of CI/CD components, we have converted some of our GitLab templates into components and asked our internal team to use them and provide additional feedback. By doing so, we are actively using and testing components in real-world scenarios, uncovering insights, and continuously improving their functionality. In this [group](https://gitlab.com/gitlab-components), we’ve converted Code Quality, Container Scanning and SAST templates into CI/CD components and asked internal teams to use them.\n\nThrough this dogfooding process, we are not only validating the effectiveness of CI/CD components but also gaining invaluable experience and feedback to refine and enhance our offering. It's a testament to our commitment to providing practical and reliable solutions for our users. You can view the ongoing discussions between the internal teams in this [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/390656).\n\n### Call for action\nThe CI/CD component catalog is currently in an experimental phase, so we advise against using it in a production environment at this time. There is a high probability of changes being made to it. We are currently working on reorganizing the folder structure of the components to prepare for the launch of the CI/CD catalog. You can stay updated on our progress by following our [epic](https://gitlab.com/groups/gitlab-org/-/epics/10728), or let us know what you think in this dedicated [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/407556).\n\n### What's next\nGitLab's CI/CD component catalog and its accompanying CI/CD components feature are ushering in a new era of streamlined pipeline configurations. By embracing reusability, abstraction, input flexibility, versioning, and a centralized catalog, developers can build efficient, adaptable, and maintainable CI/CD workflows. The CI/CD component catalog empowers teams to accelerate their software delivery, collaborate effectively, and leverage the full potential of GitLab's CI/CD capabilities.\n\nStay tuned for the launch of the CI/CD catalog, where you'll gain access to an extensive collection of components, unlocking new possibilities for your pipelines. GitLab remains committed to empowering developers with cutting-edge tools, driving innovation, and simplifying the complexities of modern software development.\n\n> Learn more about the CI/CD Catalog and components:\n>  \n> - [CI/CD Catalog goes GA: No more building pipelines from scratch](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/)\n> \n> - [A CI/CD component builder's journey](https://about.gitlab.com/blog/a-ci-component-builders-journey/)\n>\n> - [FAQ: GitLab CI/CD Catalog](https://about.gitlab.com/blog/faq-gitlab-ci-cd-catalog/)\n>\n> - [Documentation: CI/CD components and CI/CD Catalog](https://docs.gitlab.com/ee/ci/components/)\n> \n\nCover image by [Alexander Grey](https://www.pexels.com/photo/assorted-color-bricks-1148496/) on [Pexels](https://www.pexels.com).\n{: .note}\n",[726,110,749],{"slug":1529,"featured":6,"template":678},"introducing-ci-components","content:en-us:blog:introducing-ci-components.yml","Introducing Ci Components","en-us/blog/introducing-ci-components.yml","en-us/blog/introducing-ci-components",{"_path":1535,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1536,"content":1542,"config":1548,"_id":1550,"_type":16,"title":1551,"_source":17,"_file":1552,"_stem":1553,"_extension":20},"/en-us/blog/how-to-automate-creation-of-runners",{"title":1537,"description":1538,"ogTitle":1537,"ogDescription":1538,"noIndex":6,"ogImage":1539,"ogUrl":1540,"ogSiteName":692,"ogType":693,"canonicalUrls":1540,"schema":1541},"How to automate the creation of GitLab Runners","Follow this step-by-step guide for automating runner setup using new runner creation workflows.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664087/Blog/Hero%20Images/tanukicover.jpg","https://about.gitlab.com/blog/how-to-automate-creation-of-runners","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to automate the creation of GitLab Runners\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darren Eastman\"}],\n        \"datePublished\": \"2023-07-06\",\n      }",{"title":1537,"description":1538,"authors":1543,"heroImage":1539,"date":1545,"body":1546,"category":14,"tags":1547},[1544],"Darren Eastman","2023-07-06","\n\nAutomating the creation of GitLab Runners is an essential tactic in optimizing the operations and management of a runner fleet. Since announcing the [deprecation and planned removal of the legacy runner registration token](https://docs.gitlab.com/ee/architecture/blueprints/runner_tokens/#next-gitlab-runner-token-architecture) last year, there have been various questions by customers and the user community regarding the impact of the new workflow on any automation they rely on for creating and registering runners. This is a step-by-step guide for automating runner setup using the new runner creation workflows as depicted in the sequence diagram.\n\n![GitLab Runner create - sequence diagram](https://about.gitlab.com/images/blogimages/2023-06-19-how-to-automate-creating-runners/runner_create_sequence_diagram.png){: .shadow}\n\n## New terminology and concepts\nBefore we dive into the automation steps, let’s first review a few new concepts with the runner creation process and how that differs from the registration token-based method. With the `registration token` method, a `registration token` is available for the instance, for each group, and for each project. Therefore, in a large GitLab installation, with many groups, sub-groups, and projects, you can have tens of hundreds of registration tokens that any authorized user can use to connect a runner. There are two steps to authorizing a runner (the application that you install on a target computing platform) to a GitLab instance:\n1. Retrieve a registration token.\n2. Run the register command in the runner application using the previously retrieved registration token.\n\nThe workflow images below depict the runner setup steps using the registration token compared with the new runner creation process.\n\n![GitLab Runner registration workflows](https://about.gitlab.com/images/blogimages/2023-06-19-how-to-automate-creating-runners/runner_registration_workflows.png){: .shadow}\n\n### Reusable runner configurations\nNow, in the registration token method, if you authenticated multiple runners using the same registration token (a valid use case), each runner entity would be visible in the UI in a separate row in the list view. The new creation method introduces the concept of a reusable runner configuration. For example, if you have to deploy multiple runners at the instance level, each with the same configuration (executor type, tags, etc.), you simply create a runner and configuration **once**, then register each individual runner with the same authentication token that you retrieved from the first runner creation. Each of these runners is now displayed in the UI in a nested hierarchy.\n\n![Runner detailed view with shared configurations](https://about.gitlab.com/images/blogimages/2023-06-19-how-to-automate-creating-runners/runner_detail_shared_configs.png){: .shadow}\n\nWe heard from many of you that your Runners view was cluttered because each runner created received its own row in the table, even if they were the exact same configuration as 100 others. With this change, our intent is to ensure that you have the flexibility you need to configure a runner fleet at scale while ensuring that you can still easily understand and manage the fleet in the GitLab Runners view. We understand that this is a paradigm shift that may take some getting used to.\n\n## Automation steps for creating a runner\nHere are the automation steps to create a runner.\n\n### Step 1: Create an access token\nYou will first need to create an access token. A [personal access token](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html) for an administrator account will allow you to create runners at the instance, group, and project levels.\n\nIf you only need to create a group or project runner, then it is best to use a group access token or project access token, respectively. For a group or project, navigate to `Settings / Access Tokens` and create a token. You must specify a name, the token expiration date, role, and scope. For the role, select `Owner`; for the scopes, select `create_runner`.\n\nNote: The access token is only visible once in the UI. You will need to store this token in a secure location - for example, a secrets management solution such as [Hashicorp Vault](https://docs.gitlab.com/ee/ci/examples/authenticating-with-hashicorp-vault/) or the [Keeper Secrets Manager Terraform plugin](https://docs.keeper.io/secrets-manager/secrets-manager/integrations/terraform).\n\n![GitLab Runner registration workflows](https://about.gitlab.com/images/blogimages/2023-06-19-how-to-automate-creating-runners/project_access_token.png){: .shadow}\n\n### Step 2: Use the access token to create a runner in the GitLab instance\nNow that you have an access token scoped to the instance, group, or project, the next step is to use that token to create a runner automatically. In this example, we will simply invoke a POST REST endpoint in a terminal using CURL.\n\n```\ncurl -sX POST https://gitlab.example.com/api/v4/user/runners --data runner_type=group_type --data \"group_id=\u003Ctarget_group_or_project_id>\" --data \"description=software-eng-docker-builds-runner\" --data \"tag_list=\u003Cyour comma-separated tags>\" --header \"PRIVATE-TOKEN: \u003Cyour_access_token>\"\n```\n\nOnce this step is complete, the newly created runner configuration is visible in the GitLab UI. As the actual runner has not yet been configured, the status displayed is `Never contacted`.\n\nThe API will return a message with the following fields: `id`, `token`, and `token_expires_at`. You must save the value for the `token` as it will only be displayed once. \n\nAs mentioned above, a critical point to note in the new runner creation is that you can reuse the runner token value to register multiple runners. If you choose to do that, runners created with the same token will be grouped in the Runners list. Whichever runner contacted GitLab most recently will be the one whose unique data (IP address, version, last contact time and status) displays in the list. You can still view all the runners in that group _and_ compare all of their unique data by going to the details page for that runner. Each runner in the group is uniquely identified by their `system_id`.\n\nAt this point, you might ask yourself, what’s the difference between this new workflow and the workflow that relies on the registration token? The benefits are:\n1. You can now quickly identify the user that created a runner configuration. Not only does this add a layer of security compared to the old method, but it also simplifies troubleshooting runner performance issues, especially when your fleet expands.\n1. Only the creator of the runner or administrator(s) can edit crucial configuration details like tags, the ability to run untagged jobs, the setting to lock to only run jobs in the current projects it is shared with, and more.\n\n## Automation of runner install and registration\nWith the runner configuration creation steps completed, you now have a runner or runners configured in your GitLab instance and valid runner tokens that you can use to register a runner. You can manually install the runner application to a target compute host or automate the runner application installation. If you plan to host the runner on a public cloud virtual machine instance – for example, [Google Cloud Compute Engine](https://cloud.google.com/compute/docs/instances) – then a good [example pattern](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/1932#note_1172713979) provided by one of our customers for automating the runner install and registration process is as follows:\n1. Use [Terraform infrastructure as code](https://docs.gitlab.com/ee/user/infrastructure/iac/) to install the runner application to a virtual machine hosted on GCP.\n1. Use the [GCP Terraform provider](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_instance) and specifically the `metadata` key to automatically add the runner authentication token to the runner configuration file on the newly created GCP virtual machine.\n1. Register the newly installed runner with the target GitLab instance using a [cloud-init](https://cloudinit.readthedocs.io/en/latest/index.html#) script populated from the GCP terraform provider.\n\n**Example cloud-init script**\n\n```shell\n#!/bin/bash\napt update\n\ncurl -L \"https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.deb.sh\" | bash\nGL_NAME=$(curl 169.254.169.254/computeMetadata/v1/instance/name -H \"Metadata-Flavor:Google\")\nGL_EXECUTOR=$(curl 169.254.169.254/computeMetadata/v1/instance/attributes/gl_executor -H \"Metadata-Flavor:Google\")\napt update\napt install -y gitlab-runner\ngitlab-runner register --non-interactive --name=\"$GL_NAME\" --url=\"https://gitlab.com\" --token=\"$RUNNER_TOKEN\" --request-concurrency=\"12\" --executor=\"$GL_EXECUTOR\" --docker-image=\"alpine:latest\"\nsystemctl restart gitlab-runner\n```\n\n## What's next?\nSo there you have it, an overview of how to automate runner creation, installation, and registration. To summarize in three simple steps:\n1. Use the API to create a runner token and configuration.\n1. Store the retrieved authentication token in a secrets management solution.\n1. Use infrastructure as code to install the runner application on a target compute host.\n\nOur long-term vision is to directly incorporate this automation lifecycle into the product to simplify your day-to-day runner fleet management operations.\n",[726,727,704],{"slug":1549,"featured":6,"template":678},"how-to-automate-creation-of-runners","content:en-us:blog:how-to-automate-creation-of-runners.yml","How To Automate Creation Of Runners","en-us/blog/how-to-automate-creation-of-runners.yml","en-us/blog/how-to-automate-creation-of-runners",{"_path":1555,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1556,"content":1562,"config":1568,"_id":1570,"_type":16,"title":1571,"_source":17,"_file":1572,"_stem":1573,"_extension":20},"/en-us/blog/set-up-flux-for-gitops-on-openshift",{"title":1557,"description":1558,"ogTitle":1557,"ogDescription":1558,"noIndex":6,"ogImage":1559,"ogUrl":1560,"ogSiteName":692,"ogType":693,"canonicalUrls":1560,"schema":1561},"Set up Flux for GitOps to deploy workloads on OpenShift","Learn how to set up a sample project, complete a bootstrap Flux installation, and authenticate your installation with a project deploy token.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682825/Blog/Hero%20Images/genericworkflow.jpg","https://about.gitlab.com/blog/set-up-flux-for-gitops-on-openshift","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Set up Flux for GitOps to deploy workloads on OpenShift\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Bart Zhang\"}],\n        \"datePublished\": \"2023-07-05\",\n      }",{"title":1557,"description":1558,"authors":1563,"heroImage":1559,"date":1565,"body":1566,"category":14,"tags":1567},[1564],"Bart Zhang","2023-07-05","\n\nIn February, we announced that [Flux CD would be our recommended approach to do GitOps with GitLab](https://about.gitlab.com/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab/). This tutorial explains how to set up GitLab and Flux to deploy workloads on Red Hat OpenShift. You’ll set up a sample project, complete a bootstrap Flux installation, and authenticate your installation with a project deploy token. By the end of this tutorial, you should be able to deploy an example NGINX workload to OpenShift from a GitLab Repo via Flux.\n\nYou can find the fully configured tutorial project in [this GitLab repository](https://gitlab.com/gitlab-partner-demos/red-hat-demos/flux). It works in conjunction with [this repository](https://gitlab.com/gitlab-partner-demos/red-hat-demos/web-app-manifests), which contains the example OpenShift manifest. \n\n### To set up Flux for GitOps:\n1. Create a personal access token\n2. Create the Flux repository\n3. Create the OpenShift manifest repository\n4. Configure Flux to sync your manifests\n5. Verify your configuration\n\n### Prerequisites:\nYou must have an OpenShift cluster running. Cluster-admin privileges are required to install Flux on OpenShift, which can either be installed via OperatorHub or the CLI.\n\nWhen installing Flux with CLI, you need to set the nonroot SCC for all controllers in the flux-system namespace like this:\n\n```\nNS=\"flux-system\"\noc adm policy add-scc-to-user nonroot system:serviceaccount:${NS}:kustomize-controller\noc adm policy add-scc-to-user nonroot system:serviceaccount:${NS}:helm-controller\noc adm policy add-scc-to-user nonroot system:serviceaccount:${NS}:source-controller\noc adm policy add-scc-to-user nonroot system:serviceaccount:${NS}:notification-controller\noc adm policy add-scc-to-user nonroot system:serviceaccount:${NS}:image-automation-controller\noc adm policy add-scc-to-user nonroot system:serviceaccount:${NS}:image-reflector-controller\n```\nExpected output:\n```\nclusterrole.rbac.authorization.k8s.io/system:openshift:scc:nonroot added: \"kustomize-controller\"\nclusterrole.rbac.authorization.k8s.io/system:openshift:scc:nonroot added: \"helm-controller\"\nclusterrole.rbac.authorization.k8s.io/system:openshift:scc:nonroot added: \"source-controller\"\nclusterrole.rbac.authorization.k8s.io/system:openshift:scc:nonroot added: \"notification-controller\"\nclusterrole.rbac.authorization.k8s.io/system:openshift:scc:nonroot added: \"image-automation-controller\"\nclusterrole.rbac.authorization.k8s.io/system:openshift:scc:nonroot added: \"image-reflector-controller\"\n```\n\nAlso, you'll need to [patch your Kustomization](https://access.redhat.com/documentation/en-us/openshift_container_platform/4.8/html/security_and_compliance/seccomp-profiles) to remove the SecComp Profile and enforce runUserAs to the same UID provided by the images to prevent OpenShift to alter the user expected by our controllers, prior to bootstrapping the cluster.\n\nYou’ll need to create a Git repository and clone it locally. I chose to create [the web-app-manifests repository](https://gitlab.com/gitlab-partner-demos/red-hat-demos/web-app-manifests) to store my manifest file once it is created through the following steps.\n\nCreate the file structure required by bootstrap using the following command:\n\n```\ngit clone https://gitlab.com/gitlab-partner-demos/red-hat-demos/flux/\ncd flux\nmkdir -p clusters/my-cluster/flux-system\ntouch clusters/my-cluster/flux-system/gotk-components.yaml \\\n    clusters/my-cluster/flux-system/gotk-sync.yaml \\\n    clusters/my-cluster/flux-system/kustomization.yaml\n```\n\nAdd the following YAML snippet and its patches section to flux/clusters/my-cluster/flux-system/kustomization.yaml:\n\n```\napiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\nresources:\n  - gotk-components.yaml\n  - gotk-sync.yaml\npatches:\n  - patch: |\n      apiVersion: apps/v1\n      kind: Deployment\n      metadata:\n        name: all\n      spec:\n        template:\n          spec:\n            containers:\n              - name: manager\n                securityContext:\n                  runAsUser: 65534\n                  seccompProfile:\n                    $patch: delete      \n    target:\n      kind: Deployment\n      labelSelector: app.kubernetes.io/part-of=flux\n```\n\nCommit and push the changes to main branch:\n\n```\ncd ~/flux\ngit add -A && git commit -m \"init flux for openshift\" && git push\n```\n\n### Create a personal access token\n\nTo authenticate with the Flux CLI, you must create a GitLab personal access token ([PAT](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html)) with the api scope:\n1. In the upper-right corner, select your avatar.\n2. Select Edit profile.\n3. On the left sidebar, select Access Tokens.\n4. Enter a name and expiry date for the token.\n5. Select the api scope.\n6. Select Create personal access token.\n7. Copy the new token to your clipboard.\n\nNote: You can also use a project or group access token with the api scope.\n\n### Create the Flux repository\nCreate a Git repository, install Flux, and authenticate Flux with your repo in RedHat OpenShift:\n1. Make sure you are logged in as an OpenShift user in your CLI to access your cluster. `oc login` command is useful here.\n2. [Install the Flux CLI](https://fluxcd.io/flux/installation/#bootstrap). You must install Flux v2 or higher. `brew install fluxcd/tap/flux` on Mac OSX. Check your flux version with `flux -v`. Mine is `flux version 2.0.0-rc.1`.\n3. In GitLab, create a new empty project called `flux`. I chose to use [the respository in this readme](https://gitlab.com/gitlab-partner-demos/red-hat-demos/flux/)\n4. From your shell, export a GITLAB_TOKEN environment variable with the value of your personal access token. For example, `export GITLAB_TOKEN=\u003Cpersonal-access-token>`.\n5. Run the bootstrap command. The exact command depends on whether you are creating the Flux repository under a GitLab user, group, or subgroup. For more information, see the Flux bootstrap documentation.\n\nIn this tutorial, you’re working with a public project in a subgroup. The bootstrap command looks like this:\n\n```\ncd ~/flux\nflux bootstrap gitlab \\\n  --owner=gitlab-partner-demos/red-hat-demos \\\n  --repository=flux \\\n  --branch=master \\\n  --path=clusters/my-cluster \\\n  --token-auth\n```\nExpected output:\n```\n► connecting to https://gitlab.com\n► cloning branch \"master\" from Git repository \"https://gitlab.com/gitlab-partner-demos/red-hat-demos/flux.git\"\n✔ cloned repository\n► generating component manifests\n✔ generated component manifests\n✔ component manifests are up to date\n► installing components in \"flux-system\" namespace\n✔ installed components\n✔ reconciled components\n► determining if source secret \"flux-system/flux-system\" exists\n✔ source secret up to date\n► generating sync manifests\n✔ generated sync manifests\n✔ sync manifests are up to date\n► applying sync manifests\n✔ reconciled sync configuration\n◎ waiting for Kustomization \"flux-system/flux-system\" to be reconciled\n```\n\nThis command installs the Flux agent on the OpenShift cluster and configures it to manage itself from the repository flux-config. The command also automatically creates the project deploy token required to access the flux-config repository.\n\nGreat work! You now have a repository bootstrapped with a Flux configuration. Any updates to your repository are automatically synced to the cluster.\n\n### Create the OpenShift manifest repository\nNext, create a repository for your Flux manifest files. These are stateful files that track the current running configuration by\nthe Flux agent. I chose to use [web-app-manifests](https://gitlab.com/gitlab-partner-demos/red-hat-demos/web-app-manifests) project to track my manifest files.\n1. In GitLab, create a new repository called `web-app-manifests`.\n1. Add a file to web-app-manifests named `nginx-deployment.yaml` with the following contents:\n\n```\napiVersion: apps/v1\n\nkind: Deployment\n\nmetadata:\n  name: nginx-deployment\n  labels:\n    app: nginx\nspec:\n  replicas: 3\n  selector:\n    matchLabels:\n      app: nginx\n  template:\n    metadata:\n      labels:\n        app: nginx\n    spec:\n      containers:\n      - name: nginx-unprivileged\n        image: nginxinc/nginx-unprivileged:latest\n        ports:\n        - containerPort: 80\n```\n\nIn the new `web-app-manifests` repository, create a [GitLab deploy token](https://docs.gitlab.com/ee/user/project/deploy_tokens/) with only the `read_repository` scope.\n\nStore your deploy token username and password somewhere safe. I used environmental variables to save mine:\n\n```\nexport GITLAB_DEPLOY_TOKEN_USER=\u003Cmy-gitlab-deployment-token-username>\nexport GITLAB_DEPLOY_TOKEN_PASS=\u003Cmy-gitlab-deployment-token-password>\nenv |grep GITLAB_DEPLOY_TOKEN\n```\nExpected output:\n```\nGITLAB_DEPLOY_TOKEN_USER=myGitLabUserName\nGITLAB_DEPLOY_TOKEN_PASS=MySecretToken\n```\n\nIn Flux CLI, create a secret with your deploy token and point the secret to the new repository. For example:\n\n```\nflux create secret git flux-deploy-authentication \\\n         --url=https://gitlab.com/gitlab-partner-demos/red-hat-demos/web-app-manifests \\\n         --namespace=default \\\n         --username=$GITLAB_DEPLOY_TOKEN_USER \\\n         --password=$GITLAB_DEPLOY_TOKEN_PASS\n```\nExpected output:\n```\n► git secret 'flux-deploy-authentication' created in 'default' namespace\n```\n\nTo check if your secret was generated successfully, run:\n\n```\noc -n default get secrets flux-deploy-authentication -o yaml\n```\nExpected output:\n```\napiVersion: v1\ndata:\n  password: Base64EncodedPassword=\n  username: Base64EncodedUsername\nkind: Secret\nmetadata:\n  creationTimestamp: \"2023-04-20T18:22:33Z\"\n  name: flux-deploy-authentication\n  namespace: default\n  resourceVersion: \"8168670\"\n  uid: 16292254-83cd-4df2-8a9c-bc4c718e4b4a\ntype: Opaque\n```\n\nUnder data, you should see base64-encoded values associated with your token username and password.\n\nCongratulations! You now have a manifest repository, a deploy token, and a secret generated directly on your cluster.\n\n### Configure Flux to sync your manifests\nNext, tell flux-config to sync with the web-app-manifests repository.\n\nTo do so, create a [GitRepository resource](https://docs.openshift.com/container-platform/3.11/dev_guide/application_lifecycle/new_app.html) in OpenShift:\n\n1. Clone the flux repo to your machine.\n```\n# Remember that we already have the flux repo cloned into our home dir.\ncd ~/flux\ngit pull\n```\n\n2. In your local clone of flux, add the GitRepository file `clusters/my-cluster/web-app-manifests-source.yaml`:\n  \n```\n",[726,535],{"slug":1569,"featured":6,"template":678},"set-up-flux-for-gitops-on-openshift","content:en-us:blog:set-up-flux-for-gitops-on-openshift.yml","Set Up Flux For Gitops On Openshift","en-us/blog/set-up-flux-for-gitops-on-openshift.yml","en-us/blog/set-up-flux-for-gitops-on-openshift",{"_path":1575,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1576,"content":1582,"config":1588,"_id":1590,"_type":16,"title":1591,"_source":17,"_file":1592,"_stem":1593,"_extension":20},"/en-us/blog/efficient-devsecops-workflows-with-rules-for-conditional-pipelines",{"title":1577,"description":1578,"ogTitle":1577,"ogDescription":1578,"noIndex":6,"ogImage":1579,"ogUrl":1580,"ogSiteName":692,"ogType":693,"canonicalUrls":1580,"schema":1581},"DevSecOps workflows with conditional CI/CD pipeline rules","CI/CD pipelines can be simple or complex, what makes them efficient are CI rules that define when and how they run.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669673/Blog/Hero%20Images/engineering.png","https://about.gitlab.com/blog/efficient-devsecops-workflows-with-rules-for-conditional-pipelines","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to create efficient DevSecOps workflows with rules for conditional CI/CD pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2023-06-27\",\n      }",{"title":1583,"description":1578,"authors":1584,"heroImage":1579,"date":1585,"body":1586,"category":14,"tags":1587},"How to create efficient DevSecOps workflows with rules for conditional CI/CD pipelines",[1161],"2023-06-27","\nCI/CD pipelines can be simple or complex – what makes them efficient are rules that define when and how they run. By using rules, you create smarter CI/CD pipelines, which increase teams' productivity and allow organizations to iterate faster. In this tutorial, you will learn about the different types of CI/CD pipelines and rules and their use cases.\n\n## What is a pipeline?\nA pipeline is a top-level component of [continuous integration](https://docs.gitlab.com/ee/ci/introduction/index.html#continuous-integration) and [continuous delivery](https://docs.gitlab.com/ee/ci/introduction/index.html#continuous-delivery)/[continuous deployment](https://docs.gitlab.com/ee/ci/introduction/index.html#continuous-deployment), and it comprises [jobs](https://docs.gitlab.com/ee/ci/jobs/index.html), which are lists of tasks to be executed. Jobs are organized in [stages](https://docs.gitlab.com/ee/ci/yaml/index.html#stages), which define when the jobs run.\n\nA pipeline can be a [basic one](https://docs.gitlab.com/ee/ci/pipelines/pipeline_architectures.html#basic-pipelines) in which jobs run concurrently in each stage. Pipelines can also be complex, like [parent-child pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html#parent-child-pipelines), [merge trains](https://docs.gitlab.com/ee/ci/pipelines/merge_trains.html), [multi-project pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html#multi-project-pipelines), or the more advanced [Directed Acyclic Graph pipelines](https://docs.gitlab.com/ee/ci/directed_acyclic_graph/index.html) (DAG).\n\n![Complex pipeline showing dependencies](https://about.gitlab.com/images/blogimages/2023-06-15-efficient-devsecops-workflows-with-rules-for-conditional-pipelines/complex-pipelines.png)\n\nA [gitlab-runner pipeline](https://gitlab.com/gitlab-org/gitlab-runner/-/pipelines/798871212/) showing job dependencies.\n{: .note.text-center}\n\n![Directed Acyclic Graph](https://about.gitlab.com/images/blogimages/2023-06-15-efficient-devsecops-workflows-with-rules-for-conditional-pipelines/dag-pipelines.png)\n\nDirected Acyclic Graph pipeline\n{: .note.text-center}\n\nUse cases determine how complicated a pipeline can get. A use case might require testing an application and packaging it into a container; the pipeline can even further deploy the container to an orchestrator like Kubernetes or a container registry. Another use case might involve building applications that target different platforms with varying dependencies, which is where DAG pipelines shine.\n\n## What are CI/CD rules?\nCI/CD rules are the key to managing the flow of jobs in a pipeline. One of the powerful features of GitLab CI/CD is the ability to control when a CI/CD job runs, which can depend on context, changes made, [workflow](https://docs.gitlab.com/ee/ci/yaml/workflow.html) rules, values of CI/CD variables, or custom conditions. Aside from using `rules`, you can also control the flow of CI/CD pipelines using:\n\n* [`needs`](https://docs.gitlab.com/ee/ci/yaml/index.html#needs): establishes relationships between jobs and used in DAG pipelines\n* [`only`](https://docs.gitlab.com/ee/ci/yaml/index.html#only--except): defines when a job should run\n* [`except`](https://docs.gitlab.com/ee/ci/yaml/index.html#only--except): defines when a job should not run\n* [`workflow`](https://docs.gitlab.com/ee/ci/yaml/workflow.html): controls when pipelines are created\n\n`only` and `except` should not be used with `rules` as this can lead to unexpected behavior. It is recommended to use `rules`, learn more in the following sections.\n\n## What is the `rules` feature?\n`rules` determine when and if a job runs in a pipeline. If you have multiple rules defined, they are all evaluated in order until a matching rule is found and the job is executed according to the specified configuration.\n\n[Rules](https://docs.gitlab.com/ee/ci/yaml/#rules) can be defined using the keywords: `if`, `changes`, `exists`, `allow_failure`, `variables`, `when` and `needs`.\n\n### `rules:if`\nThe `if` keyword evaluates if a job should be added to a pipeline. The evaluation is done based on the values of [CI/CD variables](https://docs.gitlab.com/ee/ci/variables/index.html) defined in the scope of the job or pipeline and [predefined CI/CD variables](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html).\n\n```yaml\njob:\n  script:\n    - echo $(date)\n  rules:\n    - if: $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME == $CI_DEFAULT_BRANCH\n```\n\nIn the CI/CD script above, the job prints the current date and time with the `echo` command. The job is only executed if the source branch of a merge request (`CI_MERGE_REQUEST_SOURCE_BRANCH_NAME`) is the same as the project's default branch (`CI_DEFAULT_BRANCH`) in a [merge request pipeline](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html). You can use the `==` and `!=` operators for comparison, while `=~` and `!~` allow you to compare a variable to a regular expression. You can combine multiple expressions using the `&&` (AND), `||` (OR) operators, and parentheses for grouping expressions.\n\n### `rules:changes`\nWith the `changes` keyword, you can watch for changes to certain files or folders for a job to execute. GitLab uses the output of [Git diffstat](https://git-scm.com/docs/git-diff#Documentation/git-diff.txt\n\n```yaml\njob:\n  script:\n    - terraform plan\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\"\n      changes:\n        - terraform/**/*.tf\n```\n\nIn this example, the `terraform plan` is only executed when files with the `.tf` extension are changed in the `terraform` folder and its subdirectories. An additional rule ensures the job is executed for [merge request pipelines](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html).\n\nThe `changes` rule can look for changes in specific files with `paths`:\n\n```yaml\njob:\n  script:\n    - terraform plan\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\"\n      changes:\n        paths:\n          - terraform/main.tf\n```\n\nChanges to files in a source reference (branch, tag, commit) can also be compared against other references in the Git repository. The CI/CD job will only execute when the source reference differs from the [specified reference value defined in `rules:changes:compare_to`](https://docs.gitlab.com/ee/ci/yaml/#ruleschangescompare_to). This value can be a Git commit SHA, tag, or branch name. The following example compares the source reference to the current `production` branch (`refs/head/production`).\n\n```yaml\njob:\n  script:\n    - terraform plan\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\"\n      changes:\n        paths:\n          - terraform/main.tf\n        compare_to: 'refs/head/production'\n```\n\n### `rules:exists`\nLike `changes`, you can execute CI/CD jobs only when specific files exist [using `rules:exists` rules](https://docs.gitlab.com/ee/ci/yaml/#rulesexists). For example, you can run a job that checks whether a `Gemfile.lock` file exists. The following example audits a Ruby project for vulnerable versions of gems or insecure gem sources using the [bundler-audit project](https://github.com/rubysec/bundler-audit).\n\n```yaml\njob:\n  script:\n    - bundle-audit check --format json --output bundle-audit.json\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\"\n      changes:\n        exits:\n          - Gemfile.lock\n```\n\n### `rules:allow_failure`\nThere are scenarios where the failure of a job should not affect the following jobs and stages of the pipeline. This can be useful in use cases where non-blocking tasks are required as part of a project but don't impact the project in any way. The [`rules:allow_failure` rule](https://docs.gitlab.com/ee/ci/yaml/#rulesallow_failure) can be set to `true` or `false`. It defaults to `false` implicitly when the rule is not specified.\n\n```yaml\njob:\n  script:\n    - bundle-audit check --format json --output bundle-audit.json\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\" && $CI_MERGE_REQUEST_TARGET_BRANCH_PROTECTED == \"false\"\n      changes:\n        exits:\n          - Gemfile.lock\n      allow_failure: true\n```\n\nIn this example, the job can fail only if a merge request event triggers the pipeline and the target branch is not protected.\n\n### `rules:needs`\nDisabled by fault, [`rules:needs`](https://docs.gitlab.com/ee/ci/yaml/#rulesneeds) was introduced in [GitLab 16](https://about.gitlab.com/releases/2023/05/22/gitlab-16-0-released/) and can be enabled with the `introduce_rules_with_needs` [feature flag](https://docs.gitlab.com/ee/user/feature_flags.html). [`needs`](https://docs.gitlab.com/ee/ci/yaml/index.html#needs) is used to execute jobs out of order without waiting for other jobs in a stage to complete. When used with `rules`, it replaces the job's `needs` specification when the set conditions are met.\n\n```yaml\nstages:\n  - build\n  - qa\n  - deploy\n\nbuild-dev:\n  stage: build\n  rules:\n    - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH\n  script: echo \"Building dev version...\"\n\nbuild-prod:\n  stage: build\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n  script: echo \"Building production version...\"\n\nqa-checks:\n  stage: qa\n  script:\n    - echo \"Running QA checks before publishing to Production....\"\n\ndeploy:\n  stage: deploy\n  needs: ['build-dev']\n  rules:\n    - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH\n      needs: ['build-prod', 'qa-checks']\n    - when: on_success # Run the job in other cases\n  script: echo \"Deploying application.\"\n\n```\n\nIn the example above, the deploy job has the `build-dev` job as a dependency before it runs; however, when the commit branch is the project's default branch, its dependency changes to `build-prod` and `qa-checks`. This can allow for extra checks to be implemented based on context.\n\n### `rules:variables`\nIn some situations, you only need certain variables in specific conditions, or their values change based on content; you can use the [`rules:variables`](https://docs.gitlab.com/ee/ci/yaml/#rulesvariables) rule to define variables when specific conditions are met. This also allows to create more dynamic CI/CD execution workflows.\n\n```\njob:\n  variables:\n    DEPLOY_VERSION: \"dev\"\n  rules:\n    - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH\n      variables:\n        DEPLOY_VERSION: \"stable\"\n  script:\n    - echo \"Deploying $DEPLOY_VERSION version\"\n```\n\n### `workflow:rules`\nSo far, we have looked at controlling when jobs run in a pipeline using the `rules` keyword. Sometimes, you want to control how the entire pipeline behaves: That's where [`workflow:rules` provide a powerful option](https://docs.gitlab.com/ee/ci/yaml/#workflowrules). `workflow:rules` are evaluated before jobs and take precedence over the job rules. For example, if a job has rules that allow it to run against a specific branch, but the workflow rules set jobs running against the branch to `when: never`, the jobs will not run.\n\nAll the features of `rules` mentioned in the previous sections work for `workflow:rules`.\n\n```yaml\nworkflow:\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"schedule\"\n      when: never\n    - if: $CI_PIPELINE_SOURCE == \"push\"\n      when: never\n    - when: always\n```\n\nIn the example above, the CI/CD pipeline runs except when a schedule or push event is triggered.\n\n## Use cases for CI/CD rules\nIn the previous section, we looked at different ways of using the `rules` feature of GitLab CI/CD. In this section, we will explore practical use cases.\n\n### Developer experience\nOne of the benefits of a DevSecOps platform is to allow developers to focus on what they do best: writing their code and doing as little operations as possible. A company's DevOps or Platform team can create CI/CD templates for different stages of their development lifecycle and use rules to add CI/CD jobs to handle specific tasks based on their technology stack. A developer only needs to include a default CI/CD script and pipelines are automatically created based on files detected, refs used, or defined variables, leading to increased productivity.\n\n### Security and quality assurance\nA major function of CI/CD pipelines is to catch bugs or vulnerabilities before they are deployed into production infrastructure. Using CI/CD rules, security and quality assurance teams can dynamically run extra checks on changes introduced when certain factors are introduced. For example, malware scans can be added when new file extensions not in an approved list are detected, or more advanced performance tests are automatically added when a certain level of change has been introduced to the codebase. With GitLab's built-in security, including security in your pipelines can be done with just a few lines of code.\n\n```yaml\ninclude:\n  # Static\n  - template: Jobs/Container-Scanning.gitlab-ci.yml\n  - template: Jobs/Dependency-Scanning.gitlab-ci.yml\n  - template: Jobs/SAST.gitlab-ci.yml\n  - template: Jobs/Secret-Detection.gitlab-ci.yml\n  - template: Jobs/SAST-IaC.gitlab-ci.yml\n  - template: Jobs/Code-Quality.gitlab-ci.yml\n  - template: Security/Coverage-Fuzzing.gitlab-ci.yml\n  # Dynamic\n  - template: Security/DAST.latest.gitlab-ci.yml\n  - template: Security/BAS.latest.gitlab-ci.yml\n  - template: Security/DAST-API.latest.gitlab-ci.yml\n  - template: API-Fuzzing.latest.gitlab-ci.yml\n```\n\n### Automation\nThe power of CI/CD rules shines through in the (nearly) limitless possibilities of automating your CI/CD pipelines. GitLab [AutoDevOps](https://docs.gitlab.com/ee/topics/autodevops/) is an example. It uses an opinionated best-practice collection of [GitLab CI/CD templates](https://gitlab.com/gitlab-org/gitlab/-/tree/master/lib/gitlab/ci/templates) and rules to detect the technology stack used. AutoDevOps creates relevant jobs that take your application all the way to production from a push. You can review the [AutoDevOps template](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml) to learn how it leverages CI/CD rules for greater efficiency.\n\n### Using CI/CD components\nGrowth comes with several iterations of work and creating best practices. While building CI/CD pipelines, your DevOps team would have made several CI/CD scripts that they repurpose across pipelines using the [`include`](https://docs.gitlab.com/ee/ci/yaml/#include) keyword. In [GitLab 16](https://about.gitlab.com/releases/2023/05/22/gitlab-16-0-released/), GitLab [introduced CI/CD Components](https://about.gitlab.com/releases/2023/05/22/gitlab-16-0-released/#cicd-components), an experimental feature that allows your team to create reusable CI/CD components and publish them as a catalog that can be used to build smarter CI/CD pipelines rapidly. You can learn more [about using CI/CD components](https://docs.gitlab.com/ee/ci/components/) and the [component catalog direction](https://about.gitlab.com/direction/verify/component_catalog/).\n\nGitLab CI/CD enables you to run smarter pipelines, and it does so together with [GitLab Duo, AI-powered workflows](/gitlab-duo/) to help you build more secure software, faster.\n",[726,832,937,725,480],{"slug":1589,"featured":6,"template":678},"efficient-devsecops-workflows-with-rules-for-conditional-pipelines","content:en-us:blog:efficient-devsecops-workflows-with-rules-for-conditional-pipelines.yml","Efficient Devsecops Workflows With Rules For Conditional Pipelines","en-us/blog/efficient-devsecops-workflows-with-rules-for-conditional-pipelines.yml","en-us/blog/efficient-devsecops-workflows-with-rules-for-conditional-pipelines",{"_path":1595,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1596,"content":1602,"config":1607,"_id":1609,"_type":16,"title":1610,"_source":17,"_file":1611,"_stem":1612,"_extension":20},"/en-us/blog/quick-start-guide-for-gitlab-workspaces",{"title":1597,"description":1598,"ogTitle":1597,"ogDescription":1598,"noIndex":6,"ogImage":1599,"ogUrl":1600,"ogSiteName":692,"ogType":693,"canonicalUrls":1600,"schema":1601},"Quickstart guide for GitLab Remote Development workspaces","Learn how to create a workspace from your GitLab account and work directly from the remote development environment.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664219/Blog/Hero%20Images/2023-06-22-quickstart-workspaces-cover-image2.png","https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Quickstart guide for GitLab Remote Development workspaces\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2023-06-26\",\n      }",{"title":1597,"description":1598,"authors":1603,"heroImage":1599,"date":1604,"body":1605,"category":14,"tags":1606},[1019],"2023-06-26","\nGitLab 16.0 introduced [Remote Development workspaces (beta)](https://about.gitlab.com/releases/2023/05/22/gitlab-16-0-released/#remote-development-workspaces-available-in-beta-for-public-projects), an exciting addition to the GitLab platform that empowers teams to build and deliver software more efficiently.\n\nThis guide provides step-by-step instructions on how to create a workspace directly from your GitLab account and work directly from the remote development environment. You will work in the Web IDE, a Visual Studio Code browser version, seamlessly integrated into the workspace. \n\nFrom this quick start, you will learn how to create a workspace, use the Web IDE Terminal to install dependencies or start your server, and view your running application. \n\nTo learn more about Remote Development in GitLab, we recommend reading this informative blog post, \"[A first look at workspaces](https://about.gitlab.com/blog/introducing-workspaces-beta/),\" and the [workspaces docs](https://docs.gitlab.com/ee/user/workspace/).\n\nHere are the steps covered in this tutorial:\n\n- [Prerequisites](#prerequisites)\n- [Locate DevFile at the root of repository](#locate-devfile-at-the-root-of-repository)\n- [Create your workspace](#create-your-workspace)\n- [Install dependencies and previewing your application in the workspace](#install-dependencies-and-previewing-your-application-in-the-workspace)\n- [Make changes to the application and previewing the updated version](#make-changes-to-the-application-and-previewing-the-updated-version)\n- [Commit the change](#commit-the-change)\n- [Explore the demo](#explore-the-demo)\n- [Try out workspaces](#try-out-workspaces)\n\n## Prerequisites \nPrior to enabling developers to create workspaces, there are a few prerequisites such as bring your own Kubernetes cluster, and install and configure the GitLab agent for Kubernetes on it. Additionally, certain configuration steps must be completed on the cluster. You can find detailed instructions for all these steps in [our workspaces prequisites documentation](https://docs.gitlab.com/ee/user/workspace/configuration.html#prerequisites). Once the prerequisites are properly configured, developers who hold Developer role or above within the root group will gain the ability to create workspaces.\n\n## Locate DevFile at the root of repository\nA [devfile](https://devfile.io/docs/2.2.0/devfile-ecosystem) is a declarative configuration file, in YAML syntax, used to define and describe the development environment for a software project. It provides a standardized way to specify the necessary tools, languages, runtimes, and other components required for developing an application.\n\nTo initiate a workspace, it is necessary to have a devfile located at the root of the repository. In this blog post, we will utilize a project that contains a devfile, accessible [here](https://gitlab.com/gitlab-da/use-cases/remote-development/example-nodejs-express-app/-/raw/main/.devfile.yaml). \n\n```yaml\nschemaVersion: 2.2.0\ncomponents:\n  - name: tooling-container\n    attributes:\n      gl/inject-editor: true\n    container:\n      # NOTE: THIS IMAGE EXISTS ONLY FOR DEMO PURPOSES AND WILL NOT BE MAINTAINED\n      image: registry.gitlab.com/gitlab-org/remote-development/gitlab-remote-development-docs/debian-bullseye-ruby-3.2-node-18.12:rubygems-3.4-git-2.33-lfs-2.9-yarn-1.22-graphicsmagick-1.3.36-gitlab-workspaces\n      memoryRequest: 1024M\n      memoryLimit: 2048M\n      cpuRequest: 500m\n      cpuLimit: 1000m\n      endpoints:\n      - name: http-3000\n        targetPort: 3000\n```\nFor more information, see the [GitLab documentation](https://docs.gitlab.com/ee/user/workspace/#devfile) and [devfile documentation](https://devfile.io/docs/2.2.0/devfile-schema).\n\n## Create your workspace \n1. Make sure you have a [Developer role or above](https://docs.gitlab.com/ee/user/permissions.html) in the root group, and the above prerequisites configured properly.\n2. Fork [this project](https://gitlab.com/gitlab-da/use-cases/remote-development/example-nodejs-express-app) to the GitLab group for which you have a Developer role or above. \n3. Switch contexts and select `Your work`.\n![Your work](https://about.gitlab.com/images/blogimages/2023-07-10-your-work.png){: .shadow}\n4. Select `Workspaces`.\n5. Select `New workspace`.\n6. Select the project you forked or another project that has a `.devfile.yaml` file at the root of the repository. \n7. Select the [cluster agent](https://docs.gitlab.com/ee/user/workspace/#prerequisites) owned by the group the project belongs to.\n8. In `Time before automatic termination`, enter the number of hours until the workspace automatically terminates. This timeout is a safety measure to prevent a workspace from consuming excessive resources or running indefinitely. \n9. Select `Create workspace`. \n\n![create ws](https://about.gitlab.com/images/blogimages/create_workspace.png){: .shadow}\n\nThe workspace will be deployed to the cluster and might take a few minutes to start. To access the workspace, under Preview, select the workspace link.\n\n![ws list](https://about.gitlab.com/images/blogimages/workspaces_list.png){: .shadow}\n\n## Install dependencies and previewing your application in the workspace\nAfter creating your workspace, the [Web IDE using VS Code](https://docs.gitlab.com/ee/user/workspace/#web-ide) is injected into it, and the repository is cloned to the image. Consequently, you gain immediate access to your code and can commence working on it right away.\n\nYou can now open the terminal, install any missing dependencies, and start the application.\n\n![Terminal](https://about.gitlab.com/images/blogimages/ws-terminal.png){: .shadow}\n\n1. To open the terminal, from the left menu, select `Terminal`, `New Terminal`. \n2. Type `npm install` to install the dependencies listed in the [package.json](https://gitlab.com/gitlab-da/use-cases/remote-development/example-nodejs-express-app/-/blob/main/package.json) file.\n3. Type `npm start` to start the application.\n\nThe log will indicate that the application has started on port 3000.\n\n![log](https://about.gitlab.com/images/blogimages/server_log.png){: .shadow}\n\nYou can now access your application by opening the browser and using the workspace URL. Change the number before ‘workspace’ in the URL to the port number on which your application is listening (e.g., 3000). For example, if your workspace URL is `https://\u003Cprefix>-workspace-73241-25728545-rqvpjm.workspaces.gitlab.dev`, and your application is running on port 3000, update `\u003Cprefix>` to 3000 to access your application.\n\n## Make changes to the application and previewing the updated version\nIn the Web IDE, navigate to the `server.js` file, modify the text in line 9. \n\nAfterward, refresh the browser where your application is opened to see the applied changes. \n\n## Commit the change \n1. In the Web IDE click on the merge icon in the activity bar.\n2. Click the line with the `server.js` to view your change side by side.\n3. To stage your change, click the plus icon next to `server.js`.\n4. Type a commit message describing your change.\n5. Click Commit. \n6. Click Sync changes to push the commit to the GitLab server.\n\n  ![commit](https://about.gitlab.com/images/blogimages/commit-stage.png){: .shadow}\n\n## Explore the demo \nExplore further with this [click-through demo of workspaces](https://go.gitlab.com/qtu66q).\n\n## Try out workspaces\nRemote Development workspaces offer a convenient and efficient way to work on projects without the need for local development setups. They provide a streamlined workflow and enable developers to focus on writing code rather than dealing with complex environment setups.\n\nBy adopting workspaces, developers can collaborate effectively, improve productivity, and simplify the development process. \n\nGive workspaces a try and revolutionize your remote development experience today!\n\nCover image by \u003Ca href=\"https://unsplash.com/@pankajpatel?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Pankaj Patel\u003C/a> on \u003Ca href=\"https://unsplash.com/photos/_SgRNwAVNKw?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Unsplash\u003C/a>\n{: .note}\n",[726,894,110],{"slug":1608,"featured":6,"template":678},"quick-start-guide-for-gitlab-workspaces","content:en-us:blog:quick-start-guide-for-gitlab-workspaces.yml","Quick Start Guide For Gitlab Workspaces","en-us/blog/quick-start-guide-for-gitlab-workspaces.yml","en-us/blog/quick-start-guide-for-gitlab-workspaces",{"_path":1614,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1615,"content":1621,"config":1627,"_id":1629,"_type":16,"title":1630,"_source":17,"_file":1631,"_stem":1632,"_extension":20},"/en-us/blog/android-cicd-with-gitlab",{"title":1616,"description":1617,"ogTitle":1616,"ogDescription":1617,"noIndex":6,"ogImage":1618,"ogUrl":1619,"ogSiteName":692,"ogType":693,"canonicalUrls":1619,"schema":1620},"Tutorial: Android CI/CD with GitLab","Learn how to create an automated Android CI/CD pipeline using GitLab and fastlane.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669316/Blog/Hero%20Images/angela-compagnone-4Iyg6cNU7sI-unsplash.jpg","https://about.gitlab.com/blog/android-cicd-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: Android CI/CD with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darby Frey\"}],\n        \"datePublished\": \"2023-06-13\",\n      }",{"title":1616,"description":1617,"authors":1622,"heroImage":1618,"date":1624,"body":1625,"category":14,"tags":1626},[1623],"Darby Frey","2023-06-13","\n\nMention the word keystore and all Android developers in a 5km radius will suddenly have a small feeling of panic. Attempting to automate a [CI/CD](https://docs.gitlab.com/ee/ci/) pipeline to deploy an app can be frustrating, and configuring Google Play access and code signing is at the heart of the problem.\n\nBut fear not! GitLab Mobile DevOps is here to make this process easier and faster, and I am here to guide you.\n\n[GitLab Mobile DevOps](https://docs.gitlab.com/ee/ci/mobile_devops.html) is a collection of features built right into GitLab to solve the biggest challenges mobile teams face in establishing a DevOps practice.\n\nIn this blog post, I’ll demonstrate how to set up an automated CI/CD pipeline using GitLab and [fastlane](https://fastlane.tools/).\n\n## Prerequisites \nTo get started, there are a few prerequisites you’ll need:\n\n* A Google Play developer account - [https://play.google.com/console](https://play.google.com/console)\n* Ruby and Android Studio installed on your local machine [https://docs.fastlane.tools/getting-started/android/setup/](https://docs.fastlane.tools/getting-started/android/setup/)\n\n> Try your hand at the [iOS CI/CD for GitLab tutorial](https://about.gitlab.com/blog/ios-cicd-with-gitlab/)\n\n## Reference project\nFor this tutorial, we’ll use the Android demo project for reference: [https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/android_demo](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/android_demo).\n\n## Install fastlane\nIf you haven’t done so yet, the first step will be to install fastlane. Do this by creating a file in the root of your project called `Gemfile`. Give it the following contents:\n\n```ruby\nsource \"https://rubygems.org\"\n\ngem \"fastlane\"\n```\n\nThen, from the terminal in your project, run:\n\n```\nbundle install.\n```\n\nThis command will install fastlane, and all of its related dependencies.\n\n## Initialize fastlane\nNow that fastlane is installed, we can set it up for our project. Run the following command from the terminal in your project. You’ll be asked to enter your package name, so enter that. When prompted for the JSON secret file, you can skip that for now, and you can answer \"no\" to the questions about metadata management.\n\n```\nbundle exec fastlane init\n```\n\n![Initialize fastlane](https://about.gitlab.com/images/blogimages/2023-06-13-android-cicd-with-gitlab/fastlane-init.png)\n\nRunning this command will create a new folder in your project called `fastlane`. This folder will contain two files `Appfile` and `Fastfile`.\n\nThe Appfile contains the configuration information for the app, and the Fastfile has some sample code that we will replace later. See the fastlane docs for more information about the configuration details in the Appfile: [https://docs.fastlane.tools/advanced/Appfile/](https://docs.fastlane.tools/advanced/Appfile/).\n\n## Code signing\nNext are the steps for code signing.\n\n### Create a keystore\nThe next step is to create a keystore and properties files for code signing. Run the following command to generate a keystore in the project root called `release-keystore.jks`:\n\n```\nkeytool -genkey -v -keystore release-keystore.jks -storepass password -alias release -keypass password -keyalg RSA -keysize 2048 -validity 10000\n```\n\n![Create a keystore](https://about.gitlab.com/images/blogimages/2023-06-13-android-cicd-with-gitlab/keytool-genkey.png)\n\nMore information is available in the [keytool docs](https://download.java.net/java/early_access/loom/docs/specs/man/keytool.html).\n\nThe next step is to create a properties file to be used by [Gradle](https://gradle.org/_). Create a file in the project root called `release-keystore.properties`, with the following contents:\n\n```\nstoreFile=../release-keystore.jks\nkeyAlias=release\nkeyPassword=password\nstorePassword=password\n```\n\nAlso, be sure to add both files to your `.gitignore` file so they aren't committed to version control.\n\n### Configure Gradle\nNext, configure Gradle to use the newly created keystore. In the `app/build.gradle` file, add the following:\n\n**1.** Right after the plugins section, add:\n\n```\ndef keystoreProperties = new Properties()\ndef keystorePropertiesFile = rootProject.file('release-keystore.properties')\nif (keystorePropertiesFile.exists()) {\n    keystoreProperties.load(new FileInputStream(keystorePropertiesFile))\n}\n```\n\n**2.** Before Build Types, add:\n\n```\nsigningConfigs {\n    release {\n   \t keyAlias keystoreProperties['keyAlias']\n   \t keyPassword keystoreProperties['keyPassword']\n   \t storeFile keystoreProperties['storeFile'] ? file(keystoreProperties['storeFile']) : null\n   \t storePassword keystoreProperties['storePassword']\n    }\n}\n```\n\n**3.** Lastly, add the signingConfig to the release build type:\n\n```\nsigningConfig signingConfigs.release\n```\n\n## Upload keystore to GitLab secure files\nNext, upload your keystore files to GitLab so they can be used in CI/CD jobs. \n\n1. On the top bar, select **Menu > Projects** and find your project.\n1. On the left sidebar, select **Settings > CI/CD**.\n1. In the Secure Files section, select **Expand**.\n1. Select **Upload File**.\n1. Find the file to upload, select **Open**, and the file upload begins immediately. The file shows up in the list when the upload is complete.\n\nDo this for both the `release-keystore.jks` file and the `release-keystore.properties` file.\n\n![Upload Secure File](https://about.gitlab.com/images/blogimages/2023-06-13-android-cicd-with-gitlab/upload-secure-file.png)\n\n![List Secure Files](https://about.gitlab.com/images/blogimages/2023-06-13-android-cicd-with-gitlab/list-secure-files.png)\n\n## Create a CI/CD pipeline\n\nWith the configuration in place, now copy the contents of the .gitlab-ci.yml and fastlane/Fastfile below to the project.\n\nThis [.gitlab-ci.yml](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/android_demo/-/blob/main/.gitlab-ci.yml) has all the configuration needed to run the test, build, and beta jobs.\nThe [fastlane/Fastfile](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/android_demo/-/blob/main/fastlane/Fastfile) is an example that can be customized to specific project settings.\n\nNote: This fastlane configuration uses plugins. See the [docs](https://docs.fastlane.tools/plugins/using-plugins/) for instructions on how to configure your project for fastlane plugins.\n\n## Create an app in the Google Play Console\nNext, generate a build of your app locally and upload it to seed a new app entry in the Google Play Console. Run the following command locally:\n\n```\nbundle exec fastlane build\n```\n\nThis command will create a signed build of the app at\n\n```\nbuild/outputs/bundle/release/app-release.aab\n```\n\nWith the signed build ready to go, log in to the [Google Play Console](https://play.google.com/console) and create a new app and seed it with the initial build.\n\n## Configure Google Play integration\nThe last thing to set up is the Google Play integration in GitLab. To do so, first, create a Google service account.\n\n### Create a Google service account\nFollow the [instructions](https://docs.fastlane.tools/actions/supply/#setup) for setting up a service account in Google Cloud Platform and granting that account access to the project in Google Play.\n\n### Enable Google Play integration\nFollow the [instructions](https://docs.gitlab.com/ee/user/project/integrations/google_play.html) for configuring the Google Play integration by providing a package name and the JSON key file just generated for the service account.\n\nThis is a simplified CI/CD configuration that created three CI/CD jobs to run each of the lanes in fastlane on the GitLab Runners. The test and build jobs will run for all CI/CD pipelines, and the beta job will only be run on CI/CD pipelines on the main branch. The beta job is manually triggered, so you can control when the beta release is pushed to Google Play. \n\nWith these configurations in place, commit all of these changes and push them up to your project. The CI/CD pipeline will kick off, and you can see these jobs in action.\n",[894,110,726],{"slug":1628,"featured":6,"template":678},"android-cicd-with-gitlab","content:en-us:blog:android-cicd-with-gitlab.yml","Android Cicd With Gitlab","en-us/blog/android-cicd-with-gitlab.yml","en-us/blog/android-cicd-with-gitlab",{"_path":1634,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1635,"content":1641,"config":1647,"_id":1649,"_type":16,"title":1650,"_source":17,"_file":1651,"_stem":1652,"_extension":20},"/en-us/blog/getting-started-with-value-streams-dashboard",{"title":1636,"description":1637,"ogTitle":1636,"ogDescription":1637,"noIndex":6,"ogImage":1638,"ogUrl":1639,"ogSiteName":692,"ogType":693,"canonicalUrls":1639,"schema":1640},"Getting started with the new GitLab Value Streams Dashboard","Benchmark your value stream lifecycle, DORA, and vulnerabilities metrics to gain valuable insights and uncover patterns for continuous improvements.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671793/Blog/Hero%20Images/16_0-cover-image.png","https://about.gitlab.com/blog/getting-started-with-value-streams-dashboard","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Getting started with the new GitLab Value Streams Dashboard\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Haim Snir\"}],\n        \"datePublished\": \"2023-06-12\",\n      }",{"title":1636,"description":1637,"authors":1642,"heroImage":1638,"date":1643,"body":1644,"category":14,"tags":1645},[745],"2023-06-12","\n\n\u003Ci>This is part two of our multipart series introducing you to the capabilities within GitLab Value Stream Management and the Value Streams Dashboard. In part one, [learn about the Total Time Chart](https://about.gitlab.com/blog/value-stream-total-time-chart/) and how to simplify top-down optimization flow with Value Stream Management.\u003C/i>\n\nGetting started with GitLab [Value Streams Dashboard](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html), a customizable dashboard that enables decision-makers to identify trends, patterns, and opportunities for digital transformation improvements, is easy. If you're already using GitLab Value Stream Management, simply navigate to your project's or group's Analytics tab, and within [Value stream analytics](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#view-value-stream-analytics), click on the \"Value Streams Dashboard - DORA\" link. This will open a new page with the Value Streams Dashboard.\n\n![image of DORA Metrics console](https://about.gitlab.com/images/blogimages/vsdCover.png){: .shadow}\nDORA metrics comparison panel\n{: .note.text-center}\n\nGitLab Value Stream Management allows customers to visualize their end-to-end DevSecOps workstreams, manage their software development processes, and gain insight into how digital transformation and technological investments are delivering value and driving business results. GitLab Value Stream Management is able to do this because GitLab provides an entire DevOps platform as a single application and, therefore, holds all the data needed to provide end-to-end visibility throughout the entire software development lifecycle. So now your decisions rely on actual data rather than blind estimation or gut feelings. Additionally, because GitLab is the place where work happens, GitLab Value Stream Management insights are also actionable, allowing your users to move from \"understanding\" to \"fixing\" at any time, from within their workflow and without losing context.\n\nThe centralized UI in Value Streams Dashboard acts as the single source of truth (SSOT), where all stakeholders can access and view the same set of metrics that are relevant to the organization. The SSOT views ensure consistency, eliminate discrepancies, and provide a reliable and unified source of data for decision-making and analysis.\n\nThe first iteration of the GitLab Value Streams Dashboard was focused on enabling teams to continuously improve software delivery workflows by benchmarking [value stream lifecycle metrics, DORA metrics, and vulnerabilities metrics](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html#dashboard-metrics-and-drill-down-reports). One of the key features is a new DevSecOps metrics comparison panel that displays the metrics for a group or project in the month-to-date, last month, the month before, and the past 180 days.\n\nThis comparison enables managers to track team improvements in the context of the other DevSecOps metrics to find patterns or trends over time. The data is presented in a clear and concise manner, ensuring that you can quickly grasp the significance of the metrics.\n\n![The Value Streams Dashboard helps you get a high-level custom view over multiple DevOps metrics and understand whether they are improving month-over-month](https://about.gitlab.com/images/blogimages/2023-05-18_vsd_1.gif){: .shadow}\nValue Streams Dashboard metrics comparison panel\n{: .note.text-center}\n\nAdditionally, from each metric you can drill down to a detailed report to investigate the underlying data, understand what is affecting the team performance, and identify actionable insights.\n\nWe understand that every organization has its own set of subgroups and projects, each with specific processes and terminology. That's why we designed our dashboard to be flexible and adaptable. Users have the power to [customize](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html#customize-the-dashboard-panels) their dashboard by including panels from different subgroups or projects. \n\nTracking and comparing these metrics over a period of time helps teams catch downward trends early, drill down into individual projects/metrics, take remedial actions to maintain their software delivery performance, and track progress of their innovation investments. Value Streams Dashboard's intuitive interface reduces the learning curve and eliminates the need for extensive training. Everyone can now immediately leverage the platform's unified data store power, maximizing their productivity and saving precious time and resources.\n\n## Value Streams Dashboard roadmap\nWe are just getting started with delivering new capabilities in our Value Streams Dashboard. The roadmap includes planned features and functionality that will continue to improve decision-making and operational efficiencies.\n\nSome of the capabilities we plan to focus on next include:\n\n- adding an [executive-level summary](https://gitlab.com/groups/gitlab-org/-/epics/9558) of key metrics related to software performance and flow of value across the organization\n- adding a [\"DORA Performers score\"](https://gitlab.com/groups/gitlab-org/-/epics/10416) panel with the DORA metrics health from all the organization's groups and projects\n- adding [filter by label to the comparison panel](https://gitlab.com/gitlab-org/gitlab/-/issues/388890) - we recognize that every team does not follow the same flow so we are adding them to slice and dice the dashboard views with GitLab labels as filters\n\nTo help us improve the Value Stream Management Dashboard, please share feedback about your experience in this [survey](https://gitlab.fra1.qualtrics.com/jfe/form/SV_50guMGNU2HhLeT4).\n\n## Learn more\n* Find out what's next on the [Value Stream Management direction page](https://about.gitlab.com/direction/plan/value_stream_management/#whats-next-and-why).\n\n* Learn how to use the new dashboard using the [Value Streams Dashboard documentation](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html).\n\n* Watch this short video on Value Streams Dashboards:\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/EA9Sbks27g4\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\nCheck out part three of this multipart series: \"[GitLab's 3 steps to optimizing software value streams](https://about.gitlab.com/blog/three-steps-to-optimize-software-value-streams/)\".\n\n\u003Ci>Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab.\u003C/i>\n",[726,749,704,727,1646],"agile",{"slug":1648,"featured":6,"template":678},"getting-started-with-value-streams-dashboard","content:en-us:blog:getting-started-with-value-streams-dashboard.yml","Getting Started With Value Streams Dashboard","en-us/blog/getting-started-with-value-streams-dashboard.yml","en-us/blog/getting-started-with-value-streams-dashboard",{"_path":1654,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1655,"content":1660,"config":1665,"_id":1667,"_type":16,"title":1668,"_source":17,"_file":1669,"_stem":1670,"_extension":20},"/en-us/blog/improving-accessibility-in-gitlab-navigation",{"title":1656,"description":1657,"ogTitle":1656,"ogDescription":1657,"noIndex":6,"ogImage":1539,"ogUrl":1658,"ogSiteName":692,"ogType":693,"canonicalUrls":1658,"schema":1659},"How we improved accessibility in GitLab’s new navigation","A new navigation redesign provides an excellent opportunity to build upon the existing experience and improve accessibility for everyone.","https://about.gitlab.com/blog/improving-accessibility-in-gitlab-navigation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we improved accessibility in GitLab’s new navigation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Scott de Jonge\"}],\n        \"datePublished\": \"2023-06-12\",\n      }",{"title":1656,"description":1657,"authors":1661,"heroImage":1539,"date":1643,"body":1663,"category":14,"tags":1664},[1662],"Scott de Jonge","\n\nA new navigation redesign provides an excellent opportunity to build upon the existing experience and improve accessibility. With navigation, it’s important to review accessibility at every opportunity, refine the experience with findings, and respect people’s existing relationship with GitLab’s navigation.\n\n![Navigation dashboard](https://about.gitlab.com/images/blogimages/2023-05-31-navigation-accessibility/navigation-accessibility-illustration@2x.png){: .shadow}\n\n\nHere are the steps we took, what we learned, and the impact on our navigation redesign.\n\n## Review accessibility early\nTo know what we needed to deliver, we had to understand what the [existing navigation experience was for people using assistive technologies (AT)](https://gitlab.com/gitlab-org/gitlab/-/issues/382850#note_1187135551) such as screen readers. An effective evaluation technique is mapping the navigation [accessibility tree](https://web.dev/the-accessibility-tree/). This provided detail into the order of elements as well as their accessible roles and labels that are communicated by AT. Reducing major changes to the accessibility tree minimizes disruption to people’s memory of navigation items.\n\nEarly experiments with a [low fidelity HTML prototype of the new navigation design](https://gitlab.com/gitlab-org/gitlab/-/issues/382850#note_1192150162) provided insight into how HTML elements with appropriate [ARIA attributes](https://www.w3.org/TR/wai-aria-1.2/) could achieve an intuitive and meaningful accessibility tree. This exercise allowed for a collaborative evaluation of the proposed designs, identifying tweaks to improve accessibility.\n\n## Review accessibility frequently\nEarly prototyping gives a top-level view of accessibility, it is helpful for determining the [order of tab stops](https://developer.chrome.com/docs/lighthouse/accessibility/logical-tab-order/) and selecting the appropriate [landmarks elements](https://developer.mozilla.org/en-US/blog/aria-accessibility-html-landmark-roles/). More complex interactions, such as interactive menus, are better suited to individual reviews.\n\nAccessibility reviews of interactive menu components ensure they deliver a desirable experience in isolation. When implemented within the navigation, these components can then be reviewed in context by themselves (keyboard interactivity and accessible labels), and in relationship with other menus (logical tab stops and consistent behaviors).\n\n## Review with popular accessibility technologies\nAutomated testing tools like [aXe DevTools](https://www.deque.com/axe/devtools/) are excellent at highlighting [Web Content Accessibility Guidelines (WCAG)](https://www.w3.org/WAI/standards-guidelines/wcag/) criteria violations directly on elements. This makes it simple to identify the required changes to markup and styles for semantic meaning, readability, and contrast.\n\nIt’s important to manually test web accessibility with [popular screen reader and browser combinations](https://webaim.org/projects/screenreadersurvey9/#browsercombos) to evaluate the [75% of criteria which automated tools miss](https://karlgroves.com/efficiency-in-accessibility-testing-or-why-usability-testing-should-be-last/). When manually testing with screen readers, we identify accessibility issues, such as the logical order of elements, keyboard interactivity, and descriptive labels of elements and states, which improve the experience for people using AT or alternative input modes. It's crucial to providing an inclusive and accessible experience for all users.\n\nWhile reviewing the new navigation, we manually tested with the following screen reader and browser combinations using local devices and [Assistiv Labs](https://assistivlabs.com/):\n\n* [JAWS](https://www.freedomscientific.com/products/software/jaws/) with Chrome on Windows\n* [NVDA](https://www.nvaccess.org/about-nvda/) with Firefox on Windows\n* [VoiceOver](https://support.apple.com/en-ca/guide/voiceover/welcome/mac) with Safari on MacOS and iOS\n\nFindings from testing were logged into an [epic](https://gitlab.com/groups/gitlab-org/-/epics/9623) where they were prioritized, actioned, and tested again. \n\nHere are some key findings from our manual accessibility testing:\n1. Using [`aria-expanded`](https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA/Attributes/aria-expanded) and [`aria-controls`](https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA/Attributes/aria-controls) on `\u003Cbutton>` elements which toggle menu section to provide meaningful state descriptions to screen readers ([WCAG 2.1 3.2.3: Consistent Navigation](https://www.w3.org/WAI/WCAG21/.Understanding/consistent-navigation.html), [WCAG 2.1 4.1.2: Name, Role, Value](https://www.w3.org/WAI/WCAG21/Understanding/name-role-value))\n2. Using [`aria-current=\"page\"`](https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA/Attributes/aria-current) on the current page link `\u003Ca>` element to announce current page when focused ([WCAG 2.1 3.2.3: Consistent Navigation](https://www.w3.org/WAI/WCAG21/Understanding/consistent-navigation.html), [WCAG 2.1 4.1.2 Name, Role, Value](https://www.w3.org/WAI/WCAG21/Understanding/name-role-value)).\n3. Including a \"Skip to main content\" link to allow keyboard and screen reader users to bypass repetitive content ([WCAG 2.1 3.2.3: Consistent Navigation](https://www.w3.org/WAI/WCAG21/Understanding/consistent-navigation.html), [WCAG 2.1 2.4.1: Bypass Blocks](https://www.w3.org/WAI/WCAG21/Understanding/bypass-blocks.html)).\n4. Excluding navigation contents from focus order when collapsed ([WCAG 2.1 2.4.3: Focus order](https://www.w3.org/WAI/WCAG21/Understanding/focus-order.html)).\n\n## Leverage browser behavior\nOne of the interesting implementation challenges of the new navigation was excluding interactive content when it’s collapsed.\n\nEarly interactions used the [`aria-hidden`](https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA/Attributes/aria-hidden) attribute in combination with [`inert`](https://developer.mozilla.org/en-US/docs/Web/HTML/Global_attributes/inert) to ensure content was hidden for keyboard and screen readers. When testing with different browser and screen reader combinations we discovered content was still interactive for browsers that did not support [`inert`](https://developer.mozilla.org/en-US/docs/Web/HTML/Global_attributes/inert). \n\nInstead of [`aria-hidden`](https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA/Attributes/aria-hidden), we could use the [`visibility: hidden`](https://developer.mozilla.org/en-US/docs/Web/CSS/visibility) CSS property, which proved to work as expected. The catch was that it also affects the visibility (hence the name) for sighted users and would need to be applied only once the transition between expanded and collapsed state is complete. Adding to the complexity of the implementation. This became even more complex, as there are different default collapse states between viewport sizes, as well as a mouse-hover peek behavior. In the end this solution was not as maintainable and robust as hoped. Back to the drawing board.\n\nCollaborative efforts of browser vendors to implement web standards through [Interop](https://wpt.fyi/interop-2023) meant that the [`inert`](https://developer.mozilla.org/en-US/docs/Web/HTML/Global_attributes/inert) HTML attribute became fully supported as of [Firefox v112](https://developer.mozilla.org/en-US/docs/Mozilla/Firefox/Releases/112#html). Now the  [`inert`](https://developer.mozilla.org/en-US/docs/Web/HTML/Global_attributes/inert) attribute allowed the navigation to be marked as non-interactive and simplified the approach whilst also providing a consistent experience in modern browsers (last two major versions) and screen readers.\n\nUsing HTML standards produces a consistent experience across devices and browsers that is more familiar to users, as well as improving maintainability and robustness of the implementation.\n\n## Make iterative improvements\nWe’ve reviewed the navigation at key milestones to find and resolve accessibility issues, and refine the accessibility tree. Thanks to these efforts, we’ve iteratively improved upon the existing navigation accessibility, satisfying relevant WCAG criteria, and delivering an intuitive and familiar experience for AT.\n\n## What’s next\nAccessibility is never done, while we’ve carefully considered accessibility throughout the design and implementation stages, we need your feedback on how we can continue to iterate on the accessibility of the new navigation. Please add any feedback, questions, or ideas to the [navigation accessibility epic](https://gitlab.com/groups/gitlab-org/-/epics/9623).\n",[675],{"slug":1666,"featured":6,"template":678},"improving-accessibility-in-gitlab-navigation","content:en-us:blog:improving-accessibility-in-gitlab-navigation.yml","Improving Accessibility In Gitlab Navigation","en-us/blog/improving-accessibility-in-gitlab-navigation.yml","en-us/blog/improving-accessibility-in-gitlab-navigation",{"_path":1672,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1673,"content":1679,"config":1684,"_id":1686,"_type":16,"title":1687,"_source":17,"_file":1688,"_stem":1689,"_extension":20},"/en-us/blog/ios-cicd-with-gitlab",{"title":1674,"description":1675,"ogTitle":1674,"ogDescription":1675,"noIndex":6,"ogImage":1676,"ogUrl":1677,"ogSiteName":692,"ogType":693,"canonicalUrls":1677,"schema":1678},"Tutorial: iOS CI/CD with GitLab","Learn how to create an automated CI/CD pipeline using GitLab and fastlane.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669340/Blog/Hero%20Images/john-cameron-DgRb7aAGK4k-unsplash.jpg","https://about.gitlab.com/blog/ios-cicd-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: iOS CI/CD with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darby Frey\"}],\n        \"datePublished\": \"2023-06-07\",\n      }",{"title":1674,"description":1675,"authors":1680,"heroImage":1676,"date":1681,"body":1682,"category":14,"tags":1683},[1623],"2023-06-07","\n\nCreating an automated [CI/CD](https://docs.gitlab.com/ee/ci/) pipeline for an Apple iOS application can be challenging. Configuring build environments and managing code signing can be very time-consuming and error-prone, and when you get that all working, you still need a way to send your app to Apple.\n\nGitLab makes this much easier with [GitLab Mobile DevOps](https://docs.gitlab.com/ee/ci/mobile_devops.html).\n\nGitLab Mobile DevOps is a collection of features built right into GitLab to solve the biggest challenges mobile teams face in establishing a DevOps practice.\n\nIn this blog post, I’ll demonstrate how to set up an automated CI/CD pipeline using GitLab and [fastlane](https://fastlane.tools/).\n\n## Prerequisites\nTo get started, there are a few prerequisites you’ll need:\n\n* An Apple Developer account - [https://developer.apple.com/](https://developer.apple.com/)\n* Ruby and XCode command line tools installed on your local machine [https://docs.fastlane.tools/getting-started/ios/setup](https://docs.fastlane.tools/getting-started/ios/setup/) \n\n> Try out our [Android CI/CD with GitLab tutorial](/blog/android-cicd-with-gitlab/).\n\n## Reference project\nFor this walkthrough, we’ll use the iOS demo project for reference: [https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo)\n\n## Install fastlane\nIf you haven’t done so yet, the first step will be to install fastlane. Do this by creating a file in the root of your project called Gemfile. Give it the following contents:\n\n```\nsource \"https://rubygems.org\"\n\ngem \"fastlane\"\n```\n\nThen, from the terminal in your project, run:\n\n```\nbundle install\n```\n\nThis command will install fastlane, and all of its related dependencies.\n\n## Initialize fastlane\nNow that fastlane is installed, we can set it up for our project. Run the following command from the terminal in your project and choose Option No. 2 since we will be targeting Test Flight in this tutorial:\n\n```\nbundle exec fastlane init\n```\n\nRunning this command will create a new folder in your project called `fastlane`. This folder will contain two files `Appfile` and `Fastfile`. \n\n![Initialize Fastlane](https://about.gitlab.com/images/blogimages/2023-04-15-ios-cicd-with-gitlab/fastlane-init.png)\n\nThe Appfile contains the configuration information for the app, and the Fastfile has some sample code that we will replace later. See the fastlane docs for more information about the configuration details in the Appfile [https://docs.fastlane.tools/advanced/Appfile/](https://docs.fastlane.tools/advanced/Appfile/)\n\n## Initialize fastlane match\nThe next step will be to set up fastlane Match, which is the part of fastlane that handles code signing. For more information on fastlane match, see the docs [https://docs.fastlane.tools/actions/match/](https://docs.fastlane.tools/actions/match/ )\n\nWe’ll start by running the following command from the terminal in your project:\n\n```\nbundle exec fastlane match init\n```\n\nThis command will prompt you to choose which storage backend you want to use (select gitlab_secure_files) and to input your project path (for example: gitlab-org/gitlab). It will then generate a fastlane Matchfile configured to use your project as the storage backend for fastlane Match.\n\n![Initialize fastlane Match](https://about.gitlab.com/images/blogimages/2023-04-15-ios-cicd-with-gitlab/match-init.png)\n\n## Generate a project access token\nNext, you'll need a GitLab Access Token to use fastlane Match from your local machine. To create a project access token, visit the Access Tokens section under Settings in your GitLab project. Create a new token with maintainer access to the “api” scope.\n\nThen run the following command from the terminal in your project replacing “YOUR_NEW_TOKEN” with the access token you just generated:\n\n```\nexport PRIVATE_TOKEN=YOUR_NEW_TOKEN\n```\n\nThis will configure fastlane to use this access token when making fastlane Match requests to your project.\n\n## Generate signing certificates\nNow that fastlane Match is configured, we can use it to generate the signing certificates and provisioning profiles for our app and upload them to GitLab.\n\nNOTE: If you already have these files for your app, see the instructions in this blog post on how to use fastlane to import your existing code signing files [/blog/mobile-devops-with-gitlab-part-3-code-signing-for-ios-with-gitlab-and-fastlane/](/blog/mobile-devops-with-gitlab-part-3-code-signing-for-ios-with-gitlab-and-fastlane/).\n\nRun the following command from the terminal in your project to generate development code signing files and upload them to GitLab.\n\n```\nbundle exec fastlane match development\n```\n\nWhen this command completes, go to the CI/CD settings page in your project and scroll down to the Secure Files section to see the files that were just generated and added to your project.\n\nWhile we’re here, we can go ahead and do that same thing for the appstore code signing files. Run the following command to generate the appstore code signing files and upload them to GitLab.\n\n```\nbundle exec fastlane match appstore\n```\n\n## Update Xcode configuration\nWith the code signing files ready to go, we have one small change to make in Xcode. In your project in Xcode, go to the Signing & Capabilities section and disable automatically managing code signing. Then, select the appropriate provisioning profile and signing certificate from the list based on your build target. The certificates we just generated will show up in that list.\n\n![Configure Xcode Provisioning Profiles](https://about.gitlab.com/images/blogimages/2023-04-15-ios-cicd-with-gitlab/xcode.png)\n\nWith all of our code signing configuration in place, we can now move on to setting up the integration with the Apple App Store.\n\n## Apple App Store integration\nThe final bit of configuration is the Apple App Store integration. To do this, we’ll need to create an API key in App Store Connect. See the instructions here to create and download the key file to your location machine. This key should have the role of App Manager. [https://developer.apple.com/documentation/appstoreconnectapi/creating_api_keys_for_app_store_connect_api](https://developer.apple.com/documentation/appstoreconnectapi/creating_api_keys_for_app_store_connect_api)\n\nOnce the key is generated, go to Settings, Integrations in your project, and click on the integration for Apple App Store Connect. You’ll be asked to supply the issuer ID and key ID from App Store Connect, along with the key file you just downloaded. With all of that configuration in place, click the Test Settings button to ensure everything works. If it gives you an error, double check your settings and try again. Once it’s working, click Save Changes to save and activate the integration. \n\nWith the integration activated, the following CI variables are added to all pipelines on protected branches and tags:\n\n* `APP_STORE_CONNECT_API_KEY_ISSUER_ID`\n* `APP_STORE_CONNECT_API_KEY_KEY_ID`\n* `APP_STORE_CONNECT_API_KEY_KEY`\n\nThese CI variables can be used by fastlane or any custom tooling to interact with the Apple App Store to upload builds, or perform other API enabled tasks.\n\n## Fastfile\nWith all of our configuration in place, we can now drop in a sample Fastfile to show how to perform the build, sign, and release actions.\n\nFrom the [sample project](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo), copy the contents of the fastlane/Fastfile and paste it into the Fastfile in your project, replacing the existing content. \n\n[https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo/-/blob/main/fastlane/Fastfile](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo/-/blob/main/fastlane/Fastfile)\n\nThis sample Fastfile contains two lanes, which are actions fastlane can execute. The lanes in this file are `build` and `beta`. \n\n### Build\nThe build lane will perform just a couple of actions to `setup_ci`, `match`, and `build_app`. This will use the development certificate we generated with fastlane Match earlier to build and sign the app for development. \n\n### Beta\nThe beta lane takes a few more steps to `setup_ci`, `match`, `app_store_connect_api_key`, `increment_build_number`, `build_app`, and `upload_to_testflight`. This lane will use the appstore certificates we generated with faslane Match earlier to build and sign the app for an appstore release. This lane also uses the App Store Connect integration to connect to the app store to determine the next build number to use, and to upload the final build to Test Flight. \n\n### .gitlab-ci.yml\nWith the fastlane configuration ready to go, the last step is to hook it up to GitLab CI. \n\nFrom the [sample project](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo), copy the contents of the `.gitlab-ci.yml` file and paste it into the project. \n\n[https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo/-/blob/main/.gitlab-ci.yml](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo/-/blob/main/.gitlab-ci.yml )\n\nThis is a simplified CI configuration that created two CI jobs to run each of the lanes in fastlane on the GitLab macOS shared runners. The build job will run for all CI pipelines and the beta job will only be run on CI pipelines on the master branch. The beta job is also manually triggered, so you can control when the beta release is pushed to Test Flight. \n\nWith all of this in place, commit all of these changes and push them up to your project. The CI pipeline will kick off, and you can see these jobs in action. \n\nCover image by \u003Ca href=\"https://unsplash.com/@john_cameron?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">John Cameron\u003C/a> on \u003Ca href=\"https://unsplash.com/photos/DgRb7aAGK4k?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Unsplash\u003C/a>\n{: .note}\n",[894,110,726],{"slug":1685,"featured":6,"template":678},"ios-cicd-with-gitlab","content:en-us:blog:ios-cicd-with-gitlab.yml","Ios Cicd With Gitlab","en-us/blog/ios-cicd-with-gitlab.yml","en-us/blog/ios-cicd-with-gitlab",{"_path":1691,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1692,"content":1698,"config":1705,"_id":1707,"_type":16,"title":1708,"_source":17,"_file":1709,"_stem":1710,"_extension":20},"/en-us/blog/eks-fargate-runner",{"title":1693,"description":1694,"ogTitle":1693,"ogDescription":1694,"noIndex":6,"ogImage":1695,"ogUrl":1696,"ogSiteName":692,"ogType":693,"canonicalUrls":1696,"schema":1697},"Setting up GitLab EKS Fargate Runners in just one hour","This detailed tutorial answers the question of how to leverage Amazon's AWS Fargate container technology for GitLab Runners.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663373/Blog/Hero%20Images/jeremy-lapak-CVvFVQ_-oUg-700unsplash.jpg","https://about.gitlab.com/blog/eks-fargate-runner","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Get started with GitLab EKS Fargate Runners in 1 hour and zero code, Iteration 1\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2023-05-24\",\n      }",{"title":1699,"description":1694,"authors":1700,"heroImage":1695,"date":1702,"body":1703,"category":14,"tags":1704},"Get started with GitLab EKS Fargate Runners in 1 hour and zero code, Iteration 1",[1701],"Darwin Sanoy","2023-05-24","\nLeveraging Amazon's AWS Fargate container technology for [GitLab Runners](https://docs.gitlab.com/runner/) has been a longstanding ask from our customers. This tutorial gets you up and running with the GitLab EKS Fargate Runner combo in less than an hour.\n\nGitLab has a pattern for this task for [Fargate](https://docs.aws.amazon.com/AmazonECS/latest/userguide/what-is-fargate.html) runners under AWS Elastic Container Service (ECS). The primary challenge with this solution is that AWS ECS itself does not allow for the overriding of what image is used when calling an ECS task. Therefore, each GitLab Runner manager ignores the gitlab-ci.yml `image:` tag and runs on the image preconfigured in the task during deployment of the runner manager. As a result, you'll end up creating runner container images that contain every dependency for all the software built by the runner, or you'll create a lot of runner managers per image — or both.\n\nI have long wondered if Fargate-backed Elastic Kubernetes Service (EKS) could get around this limitation since, by nature, Kubernetes must be able to run any image given to it.\n\n## The approach\n\nNothing takes the joy out of learning faster than a lot of complex setup before being able to get to the point of the exercise. To address this, this tutorial uses four things to dramatically reduce the time and steps required to get from zero to hero.\n\n1. AWS CloudShell to minimize the EKS Admin Tooling setup. This also leaves your local machine environment untouched so that other tooling configurations don't get modified.\n2. A project called **AWS CloudShell ”Run From Web” Configuration Scripts** to rapidly add additional tooling to CloudShell. This includes some hacks to get large Terraform templates to work on AWS CloudShell.\n3. EKS Blueprints — specifically, a Terraform example that implements both the [Karpenter autoscaler](https://aws.amazon.com/blogs/aws/introducing-karpenter-an-open-source-high-performance-kubernetes-cluster-autoscaler/) and Fargate, including for the kube-system namespace.\n4. A simple Helm install for GitLab Runner.\n\nAlthough you will be running CLI commands and editing config files, no coding is required in the sense that you won't have to build something complex from scratch and then maintain it yourself.\n\n## The results\n\nIt works! It can run 2 x 200 (max allowed per job) parallel “Hello, World” jobs on AWS Fargate-backed EKS in about 4 minutes, which demonstrates the unlimited scalability. It can also run a simple Auto DevOps pipeline, which proves out the ability to run a bunch of different containers.\n\nThe fact that the entire cluster - including kube-system - is Fargate backed reduces the Kubernetes specific long term SRE work to a much lower value approaching that of ECS Fargate clusters. Later on we discuss that this trade-off has a cost and how it can be reconfigured.\n\n## What makes it possible: Product-managed IaC that is an extensible framework\n\nToolkitting made up of Infrastructure as Code (IaC) is frequently referred to as “templates,” and these templates have a reputation of not aging well because there is no active stewardship of the codebase — they are thought of as a one-and-done effort. However, this term does not reflect reality well when the underlying IaC code is actually being product-managed. You can tell if something is being product-managed by using these markers:\n\n- It has a scope-bounded vision of what it wants to do for the community being served (customer).\n- It has active stewardship that keeps the codebase moving along, even if it is open source.\n- It seeks to incorporate strategic enhancements, a.k.a. new features.\n- Things that are broken are considered bugs and are actively eliminated.\n- There is a cadence of taking underlying version updates and for supporting new versions of the primary things they deploy.\n\nAs an extensible framework, EKS Blueprints:\n\n- Are purposefully architected to be extended by anyone.\n- Already have many extensions built.\n\nWhen implementing using EKS Blueprints and you come upon a new need, it is important to check if EKS Blueprints already handles that consideration - similarly to how you would look for Ruby Gems, NPM Modules or Python PyPI packages before building functionality from scratch.\n\nAll of the above are aspects of how the AWS EKS team is product-managing EKS Blueprints. They deserve a big round of applause because product-managing anything to prevent it from becoming yet another community-maintained shelfware project is a strong commitment that requires tenacity!\n\n## Reproducing the experiment\n\n### 1. Set up AWS CloudShell\n\n> **Note:** If you already have a fully persistent environment setup (like your laptop) with: AWS CLI, kubectl, Terraform, then you can avoid environment rebuilds when AWS CloudShell times out by using that instead.\n\nAWS CloudShell comes with kubectl, Git, and AWS CLI, which are all needed. However, we also need a few other scripts. More information about these scripts can be read in [my blog post on AWS CloudShell “Run For Web” Configuration Scripts](https://missionimpossiblecode.io/aws-cloudshell-run-from-web-configuration-scripts).\n\n> **Note:** The steps in this section up through the `git clone` from GitLab step (second clone operation) in the next section can be accomplished by running this: `s=prep-eksblueprint-karpenter.sh ; curl -sSL https://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/${s} -o /tmp/${s}; chmod +x /tmp/${s}; bash /tmp/${s}*` .\n\n1. Use the web console to login to an AWS account where you have admin permissions.\n2. Switch to the region of your choosing.\n3. In the bottom left of the console click the “CloudShell” icon.\n4. Copy and paste the following one-liner into the console to install Helm, Terraform, and the Nano text editor:\n   `curl -sSL https://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/add-all.sh -o $HOME/add-all.sh; chmod +x $HOME/add-all.sh; bash $HOME/add-all.sh`\n5. Since our Terraform template will grow larger than the 1GB limit of space in the $HOME directory, we need a workaround to use the template in one directory, but store the Terraform state in $HOME where it will be kept as long as 120 days. The following one-liner triggers a script that performs that setup for us, after which we can use the /terraform directory for our template:\n   `curl -sSL https://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/prep-for-terraform.sh -o $HOME/prep-for-terraform.sh; chmod +x $HOME/prep-for-terraform.sh; bash $HOME/prep-for-terraform.sh`\n\n### 2. Run Terraform EKS Blueprint\n\n> **Note:** If at any time you leave your AWS CloudShell long enough for your session to end, the /terraform directory will be tossed. Simply run the last script above and the first four steps below to make it operable again. This will most likely be necessary when it comes time to teardown the Terraform created AWS resources.\n>\n> Sometimes your AWS CloudShell credentials may expire with a message like: `Error: Kubernetes cluster unreachable: Get \">CLUSTER URL>\": getting credentials: exec: executable aws failed with exit code 255`. Simply refresh the entire browser tab where AWS CloudShell is running and you’ll generally have new credentials.\n\n#### Version safety\n\nThis tutorial uses a specific release of the EKS Blueprint project so that you have the known state at the time of publishing. The project version also cascades into the versions of all the many dependent modules. While it may also work with the latest version, the version at the time of writing was Version 4.29.0.\n\nThis tutorial also uses Terraform binary Version 1.4.5.\n\n#### Procedures\n\nIf, while using AWS CloudShell, you experience this error: `Error: configuring Terraform AWS Provider: no valid credential sources for Terraform AWS Provider found`, you will need to refresh your browser to update the cached credentials in the terminal session.\n\nPerform the following commands on the AWS CloudShell session:\n\n1. `git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git --no-checkout /terraform/terraform-aws-eks-blueprints` \n2. `cd /terraform/terraform-aws-eks-blueprints/`\n3. `git reset --hard tags/v4.29.0` #Version pegging to the code that this article was authored with.\n4. `git clone https://gitlab.com/guided-explorations/aws/eks-runner-configs/gitlab-runner-eks-fargate.git /terraform/terraform-aws-eks-blueprints/examples/glrunner`\n\n   **Note:** Like other EKS Blueprints examples, the GitLab EKS Fargate Runner example references EKS Blueprint modules with a relative directory reference. This is why we are cloning it into a subdirectory of the EKS Blueprints project.\n5. `cd /terraform/terraform-aws-eks-blueprints/examples/glrunner`\n6. `terraform init`\n\n   **Important**: If you are using AWS CloudShell and your session times out, the /terraform folder and the installed utilities will be gone. You would have to reproduce the above steps to get the Terraform template in a usable state again. This is most likely to happen when you go to use Terraform to delete the stack after playing with it for some days.\n\n   The next few instructions are from: **https://github.com/aws-ia/terraform-aws-eks-blueprints/blob/main/examples/karpenter/README.md#user-content-deploy**. Note the `-state` switch ensures our state is in persistent storage.\n7. `terraform apply -target module.vpc -state=$HOME/tfstate/runner.tfstate`\n8. `terraform apply -target module.eks -state=$HOME/tfstate/runner.tfstate`\n9. **Note:** If you receive “Error: The configmap ”aws-auth” does not exist”, re-run the same command - it will usually update successfully.\n10. `terraform apply -state=$HOME/tfstate/runner.tfstate`\n\nThe previous command will output a kubeconfig command that needs to be run to ensure subsequent kubectl commands work. Run that command. If you are in AWS CloudShell and did not copy the command, this command should work and map to the correct region:\n    `aws eks update-kubeconfig --region $AWS_DEFAULT_REGION --name \"glrunner\"`\n\nIf everything was done correctly, you will have an EKS cluster named `karpenter` in the CloudShell region web console like this:\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/eksclusterinconsole.png)  \n\nAnd the output of this console command `kubectl get pods -A` will look like this:\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/cliplaincluster.png)\n\nThe output of this console command `kubectl get nodes -A` will show the Fargate prefix:\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/clinodesarefargate.png)\n\n> **Note:** Notice that all the EKS extras (coredns, ebs-cni, and karpenter itself) are also running on Fargate. If you are willing to tolerate some regular Kubernetes nodes, you may be able to save cost by running always-on pods on regular Kubernetes hosts. Since this cluster runs Karpenter, you will not need to manually scale those hosts and EKS makes control plane and node updates easier.\n\n### 3. Install GitLab Runner\n\nThese and other commands are available in the GitLab documentation for [GitLab Runner Helm Chart](https://docs.gitlab.com/runner/install/kubernetes.html#additional-configuration).\n\n1. Create an empty GitLab project.\n2. Retrieve a GitLab Runner Token from the project. Keep in mind that using a project token is the easiest way to ensure your experiment runs only on the EKS Fargate Runner. Using a group token may cause your job to run on other runners already setup at your company. You can follow [“Obtain a token”](https://docs.gitlab.com/runner/register/#requirements) from the documentation if you need to.\n3. Perform the following commands back in the AWS CloudShell session.\n4. `nano runnerregistration.yaml`\n5. Paste the following:\n\n   ```yaml\n   gitlabUrl: https://_YOUR_GITLAB_URL_HERE_.com\n   runnerRegistrationToken: _YOUR_GITLAB_RUNNER_TOKEN_HERE_\n   concurrent: 200\n   rbac:\n     create: true\n   runners:\n     tags: eks-fargate\n     runUntagged: true\n     imagePullPolicy: if-not-present\n   envVars:\n     - name: KUBERNETES_POLL_TIMEOUT\n       value: 90  \n   ```\n\n   **Note:** Many more settings are discussed in the documentation for the [Kubernetes Executor](https://docs.gitlab.com/runner/executors/kubernetes.html). \n\n**Hard Lesson:** Using a setting for `concurrent` that is lower than our `parallel` setting in the GitLab job below results in all kinds of failures due to some job pods having to wait for an execution slot. Since it’s Fargate, there is no savings to keeping it lower and no negative impact to making it the complete parallel amount.\n\n6. Replace \\_YOUR_GITLAB_URL_HERE_ with your actual GitLab URL.\n7. Replace \\_YOUR_GITLAB_RUNNER_TOKEN_HERE_ with your actual runner token.\n8. Press CTRL-X to exit and press Y to the save prompt.\n9. `helm repo add gitlab https://charts.gitlab.io`\n10. `helm repo update gitlab`\n11. `helm install --namespace gitlab-runner --create-namespace runner1 -f runnerregistration.yaml gitlab/gitlab-runner`\n12. Wait for a few minutes and check the project’s list of runners for a new one with the tag `eks-fargate`\n\nIn AWS CloudShell the command `kubectl get pods -n gitlab-runner` should produce output similar to this:\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/runnerlist.png)\n\nAnd in the GitLab Runner list, it will look similar to this:\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/glrunnerlist.png)\n\n### 4. Run a test job\n\nThe simplest way to test GitLab Runner scaling is using the `parallel:` keyword to schedule multiple copies of a job. It can also be used to create a job matrix where not all jobs do the same thing.\n\nOne or more GitLab Runner Helm deployments can live in any namespace, so you have many to many mapping flexibility for how you think of runners and their Kubernetes context.\n\nIn the GitLab project where you created the runner, use the web IDE to create .gitlab-ci.yml and populate it with the following content:\n   ```yaml\n   parallel-fargate-hello-world:\n     image: public.ecr.aws/docker/library/bash\n     stage: build\n     parallel: 200\n     script:\n       - echo \"Hello Fargate World\"\n   ```\n\n**Hard Lesson:** After hitting the Docker hub image pull rate limit, I shifted to the same container in the AWS Public Elastic Container Registry (ECR), which has an [image pull rate limit](https://docs.aws.amazon.com/AmazonECR/latest/public/public-service-quotas.html) of 10 per second for this scenario.\n\nIf the job does not automatically start, use the pipeline page to force it to run.\n\nIf everything is configured correctly, your final pipeline status panel should look something like this:\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/eks-fargate-runner/completedjobs.png)\n\n### 5. Runner scaling experimentation\n\nThese and other commands are available in the GitLab documentation for [GitLab Runner Helm Chart](https://docs.gitlab.com/runner/install/kubernetes.html#additional-configuration).\n\nAdditional runners can be added by re-running the install command with a different name for the runner (if using the same token you’ll have two runners in the same group or project):\n\n`helm install --namespace gitlab-runner runner2 -f runnerregistration.yaml gitlab/gitlab-runner`\n\n200 jobs takes just under 2 minutes.\n\n#### 400 parallel jobs\n\nBy setting up a second identical job (with a unique job name), I was able to process 400 total jobs.\n\n**Hard Lesson:** The runner likes to schedule all jobs in a parallel job on the same runner instance. It does not seem to want to split a large job across multiple runners registered in the same project. So in order to get more than 200 jobs to process, I had to have two registered runners set to `concurrent:200` and two seperate jobs set to `parallel: 200`\n\n400 jobs takes just over 3 minutes.\n\n#### More than 400 parallel jobs\n\nAs I tried to scale higher, jobs started to hang. I tried specifically routing jobs to five runners each capable of 300 parallel jobs. I also tried multiple stages and used a hack of `needs []` to get simultaneous execution of jobs in multiple stages.\n\nI was not successful and there could be a wide variety of reasons why — a riddle for a future iteration.\n\nThis command can be used to update a runner's settings after editing the Helm values file (including the token to move the runner to another context): \n\n`helm upgrade --namespace gitlab-runner -f runnerregistration.yaml runner2 gitlab/gitlab-runner`\n\nI found that when I pushed the limits, I would sometimes end up with hung pods until I understood what needed adjusting. Leaving hung Fargate pods will add up to a lot of cash because the pricing assumes very short execution times. This command helps you terminate job pods without accidentally terminating the runner manager pods:\n\n`kubectl get pods --all-namespaces --no-headers |  awk '{if ($2 ~ \"_YOUR_JOB_POD_PREFACE_*\") print $2}' | xargs kubectl -n _YOUR_RUNNER_NAMESPACE_ delete pod`\n\nDon't forget to replace \\_YOUR_RUNNER_NAMESPACE_ and \\_YOUR_JOB_POD_PREFACE_ “_YOUR_JOB_POD_PREFACE\\_” is the unique preface of ONLY the jobs from a given runner followed by the wildcard star character => \\*\n\nTo uninstall a runner, use:\n\n`helm delete --namespace gitlab-runner runner1`\n\n#### Testing Auto DevOps to prove `image:` tag is honored\n\nTechnically testing Auto DevOps to prove the `image:` tag is honored this isn’t entirely necessary since the above job loads the bash container without the container being specified in any of the runner or infrastructure setup. However, I performed this as a litmus test anyway.\n\nFollow these steps:\n\n1. Create a new project by clicking the “+” sign in the top bar of GitLab.\n2. On the next page, select “New Project/Repository”.\n3. Then “Create from template”.\n4. Select “Ruby on Rails” (first choice).\n5. Once the project creation is complete, register an EKS runner to it (or re-register the existing runner to the new project).\n6. In the project, select “Settings (Gear Icon)” => “CI/CD” => Auto DevOps => Default to Auto DevOps pipeline.\n7. Click “Save changes”.\n\nThe Auto DevOps pipeline should run. If you don’t have a cluster wired up, it will mainly do security scanning, which is sufficient to prove that arbitrary containers can be used by the Fargate-backed GitLab Runner.\n\n### 6. Solution tuning via extensible platform\n\nEKS Blueprints is not only product-managed, it is also an extensible platform or framework. In the spirit of fully leveraging the extensible product managed EKS Blueprints project, you will always want to check if Blueprints is already instrumented for your scenario before writing code. Additionally, if you must write code, you can consider contributing it as an EKS Blueprint extension so the community can take on some responsibility for maintaining it.\n\n1. The EKS Blueprints Managed IaC has a dizzing number of tuning parameters and optional extensions. For instance, if you want the full GitLab Runner logs collected to AWS CloudWatch, it is a simple configuration to add fluentd log agent to push custom logs to CloudWatch.\n2. Using Fargate for always-on containers is a trade-off of compute costs to get rid of Kubernetes node management overhead. This trade-off can be easily reversed in this example by removing the \"kube-system\" from \"fargate_profiles\" - since Karpenter is also installed and configured, the hosts will autoscale for load.\n\n### 7. Teardown\n\nThe next few instructions are from https://github.com/aws-ia/terraform-aws-eks-blueprints/blob/main/examples/karpenter/README.md#user-content-destroy.\n\nIf you are using AWS CloudShell and the /terraform directory no longer exists, perform these steps to re-prepare AWS CloudShell to perform teardown.\n\nIf you are not using AWS CloudShell, skip forward to “Teardown steps”.\n\n1. `curl -sSL https://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/add-all.sh -o $HOME/add-all.sh; chmod +x $HOME/add-all.sh; bash $HOME/add-all.sh`\n2. `curl -sSL https://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/prep-for-terraform.sh -o $HOME/prep-for-terraform.sh; chmod +x $HOME/prep-for-terraform.sh; bash $HOME/prep-for-terraform.sh`\n3. `git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git --no-checkout /terraform/terraform-aws-eks-blueprints` \n4. `cd /terraform/terraform-aws-eks-blueprints/`\n5. `git reset --hard tags/v4.29.0`\n6. `git clone https://gitlab.com/guided-explorations/aws/eks-runner-configs/gitlab-runner-eks-fargate.git /terraform/terraform-aws-eks-blueprints/examples/glrunner`\n\n   > **Note:** The above steps can be accomplished by running this: `s=prep-eksblueprint-karpenter.sh ; curl -sSL https://gitlab.com/guided-explorations/aws/aws-cloudshell-configs/-/raw/main/${s} -o /tmp/${s}; chmod +x /tmp/${s}; bash /tmp/${s}` .\n\n7. `cd /terraform/terraform-aws-eks-blueprints/examples/glrunner`\n8. `terraform init`\n\nFollow these teardown steps:\n\n1. `helm delete --namespace gitlab-runner runner1`\n2. `helm delete --namespace gitlab-runner runner2`\n3. `terraform destroy -target=\"module.eks_blueprints_kubernetes_addons\" -auto-approve -state=$HOME/tfstate/runner.tfstate`\n4. `terraform destroy -target=\"module.eks\" -auto-approve -state=$HOME/tfstate/runner.tfstate`\n5. **Note:** If you receive an error about refreshing cached credentials, simply re-run the command again and it will usually update successfully.\n6. `terraform destroy -auto-approve -state=$HOME/tfstate/runner.tfstate`\n\n### Iteration _n_ : We would love your input\n\nThis blog is \"Iteration 1\" precisely because it has not been production load-tested nor specifically cost-engineered. And obviously a “Hello, World” script is not testing much in the way of real work. I really set out to understand if we could run arbitrary containers in a GitLab Fargate setup (and we can) and then got curious about what parallel job scaling might look like with Fargate (and it looks good). The Kubernetes Runner executor has many, many available customizations and it is likely that scaling a production loaded implementation on EKS will reveal the need to tune more of these parameters. \n\n#### **Collaborative contribution challenges**\n\nHere are some ideas for further collaborative work on this project:\n\n- To push the limits, create a configuration that can scale to 1000 simultaneous jobs.\n- An aws-logging config map that uploads runner pod logs to AWS CloudWatch.\n- A cluster configuration where runner managers and everything that is not a runner job run on non-Fargate nodes – if and only if it will be cheaper than Fargate running 24 x 7.\n- A Fargate Spot configuration. It’s important that compute type be noted as a runner tag and it’s important that the same cluster has non-spot instances because some jobs should not run on spot compute and the decision whether to do so should be available to the GitLab CI Developer who is creating an pipeline.\n\n#### Other runner scaling initiatives\n\nWhile GitLab is building the Next Runner Auto-scaling Architecture, [Kubernetes refinements are not a part of this architectural initiative](https://docs.gitlab.com/ee/architecture/blueprints/runner_scaling/#proposal).\n\n#### Everyone can contribute\n\nThis tutorial, as well as code for additional examples, will be maintained as open source as a GitLab Alliances Solution and we’d love to have your contributions as you iterate and discover the configurations necessary for your real-world scenarios. This tutorial is in a group wiki and the code will be in the projects under that group here: [AWS Guided Explorations for EKS Runner Configurations](https://gitlab.com/guided-explorations/aws/eks-runner-configs/gitlab-runner-eks-fargate/-/blob/main/README.md). \n\nPhoto by [Jeremy Lapak](https://unsplash.com/@jeremy_justin?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/runner?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[894,750,771],{"slug":1706,"featured":6,"template":678},"eks-fargate-runner","content:en-us:blog:eks-fargate-runner.yml","Eks Fargate Runner","en-us/blog/eks-fargate-runner.yml","en-us/blog/eks-fargate-runner",{"_path":1712,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1713,"content":1719,"config":1725,"_id":1727,"_type":16,"title":1728,"_source":17,"_file":1729,"_stem":1730,"_extension":20},"/en-us/blog/how-to-harden-your-self-managed-gitlab-instance",{"title":1714,"description":1715,"ogTitle":1714,"ogDescription":1715,"noIndex":6,"ogImage":1716,"ogUrl":1717,"ogSiteName":692,"ogType":693,"canonicalUrls":1717,"schema":1718},"How to harden your self-managed GitLab instance","Learn seven easy steps to ensure your self-managed GitLab instance is as secure as possible.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664923/Blog/Hero%20Images/security-checklist.png","https://about.gitlab.com/blog/how-to-harden-your-self-managed-gitlab-instance","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to harden your self-managed GitLab instance\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ayoub Fandi\"}],\n        \"datePublished\": \"2023-05-23\",\n      }",{"title":1714,"description":1715,"authors":1720,"heroImage":1716,"date":1722,"body":1723,"category":14,"tags":1724},[1721],"Ayoub Fandi","2023-05-23","\n\"You are as secure as your weakest link\" is a well-understood phrase. If attackers find their way in, they will exploit any gaps in your security configurations. Hardening — the process of turning off unused features and making adjustments to settings that have security implications — is critical to limit your attack surface and reduce potential attack vectors.\n\nHardening ensures that your application (in this case GitLab) is as secure as it can be. The goal is simple: minimize risk while still preserving enough functionality for users to remain as productive as possible.\n\n## Guiding principles\nThese principles should guide the way you approach hardening. The security activities in the checklist below will tie back to one or another of these principles.\n\n### Layered security\nThe idea behind this is simple. If there are two ways to implement security, both ways should be implemented instead of just one. You can try to combine as many methods as possible. \n\nFor instance, if you are trying to secure access to your service, you could combine a complex password with hardware tokens and multifactor authentication. This approach is also called defense-in-depth.\n\n### No security through obscurity\nThe idea of hiding things works in many cases, but not so in the information security world. The premise that if something is hidden then it is more secure isn’t a viable approach today. \n\nCurrent scanning capabilities available to attackers shatter through obscurity. It is very easy for anyone to scan for open ports on a system. If you’ve swapped the SSH port TCP 22 to a different port, it would be picked up by a network scanning tool such as Nmap. \n\nAs GitLab is committed to transparency and open source, our approach is at odds with security through obscurity. The goal of security is to remove any security through obscurity. Our documentation is available to all and security best practices are clearly labeled and detailed.\n\n### Reducing the attack surface\nGitLab comprises numerous components, services, and dependencies. A critical aspect of security is: The more components you have, the more entryways attackers have.\n\nA good rule to keep in mind is to always disable services that you do not need to run the application. If there are features that aren’t used, disabling the related services will reduce the potential attack surface and make you more secure.\n\n## 7 steps to secure your self-managed instance\nLet’s go through seven easy steps to quickly harden your self-managed instance. These quick wins are great first steps towards securing your installation. Of course, refer to the [documentation](https://docs.gitlab.com/ee/security/) for additional details and further guidance on each section.\n\n### 1. Enable multi-factor authentication\n**Admin > Settings > General > Sign-in restrictions**\n\nEnsure that the checkbox next to _Two-factor authentication_ (2FA) is **checked**. The default setting for _Two-factor grace period_ is 48 hours. Adjust it to a lower value, such as **8 hours**.\n\nEnsure the checkbox next to _Enable admin mode_ is **checked** so that _Admin Mode_ is **active**. Users with Admin access will have to use additional authentication to perform administrative tasks. With 2FA enabled, this will require additional 2FA authentication by the user.\n\nFor more detailed information, refer to the documentation on [sign-in restrictions](https://docs.gitlab.com/ee/administration/settings/sign_in_restrictions.html).\n\n### 2. Enforce additional sign-up checks\n**Admin > Settings > General > Sign-up restrictions**\n\nNext to _Sign-up enabled_ ensure the checkbox is **unchecked**.\n\nUnder _Email confirmation settings_ ensure that **Hard** is selected. This will require the user to verify their email address during the sign-up process before their account is allowed access.\n\nThe _Minimum password length (number of characters)_ default setting of 12 characters is fine if additional authentication techniques are enforced. Options available for password complexity include _Require numbers_, _Require uppercase letters_, _Require lowercase letters_, and _Require symbols_. Check these boxes depending on your internal password standard (also check out [NIST SP 800-63B](https://pages.nist.gov/800-63-3/sp800-63b.html)).\n\nIf all users' email addresses are under a single domain (e.g., example.com), consider **adding it** to the _Allowed domains for sign-ups_. This will prevent those with email addresses associated with other domains from signing up.\nFor more detailed information, refer to the documentation on [sign-up restrictions](https://docs.gitlab.com/ee/administration/settings/sign_up_restrictions.html).\n\n### 3. Limit public visibility of your groups and projects\n**Admin > Settings > General > Visibility and access control**\n\nThe _Default project visibility_ and _Default group visibility_ for any newly created project or group should be set to **Private** by default. Only users that are granted specific access to a project or group will be able to access these resources. This can be adjusted later if necessary or when creating a new project or group. This ensures the default mode is secure to prevent accidental disclosure of information.\n\nFor more details on Visibility and access control [refer to the documentation](https://docs.gitlab.com/ee/administration/settings/visibility_and_access_controls.html).\n\n### 4. Harden your SSH settings\n**Admin > Settings > General > Visibility and access control**\n\nTypically, under _Enabled Git access protocols_ it will be set to _Both SSH and HTTP(S)_. If one of the Git protocols is not in use by your users, set it to **either** _Only SSH_ or _Only HTTP(S)_ accordingly. This will reduce the attack surface by limiting possibilities of compromise through an unused protocol.\nFor SSH key types, the most recommended algorithms to use are, in order: \n1. ED25519\n1. RSA \n1. ECDSA\n\nWhen configuring default types and lengths for SSH keys, keep in mind the list above.\n\nSpecific details on SSH settings can be found [here](https://docs.gitlab.com/ee/security/ssh_keys_restrictions.html) and [here](https://docs.gitlab.com/ee/administration/settings/visibility_and_access_controls.html#configure-enabled-git-access-protocols) for Git Access protocols.\n\n### 5. Review the account and limit settings\n**Admin > Settings > General > Account and limit settings**\n\nThis section allows you to limit the size of attachments, pushes, exports, imports, or repositories. As the specific size (in MB) will be tailored to your needs, review these settings and **set limits** in line with your internal policies.\n**Session duration for users** (in minutes) and **lifetime of SSH keys and all access tokens** (in days) can also be configured. Ensure the durations are in accordance with your internal policies and security best practices.\n\nReview the [documentation](https://docs.gitlab.com/ee/administration/settings/account_and_limit_settings.html) and apply changes that enforce your own policies.\n\n### 6. Secure your CI secrets\n**Admin > Settings > CI**\n\nPasswords, tokens, keys, and other secrets that require any level of protection should never be stored in plaintext. Instead, some type of **encrypted container technology (Secrets Manager)** should be implemented, such as GCP's Secret Manager, AWS Key Management Service (KMS), or HashiCorp Vault. For self-managed and standalone instances, HashiCorp Vault is **recommended**, and many GitLab features can take advantage of Vault and are well described in the [documentation](https://docs.gitlab.com/search/?query=vault).\n\nFor external communications, ensure any connectivity with external hosts in your CI/CD process is using encrypted channels. The use of TLS 1.2 or above is highly recommended and where possible mutual TLS will help things considerably.\nFor details on the use of external secrets for your CI/CD pipeline, check [here](https://docs.gitlab.com/ee/ci/secrets/) for actual examples and configuration guides.\n\n### 7. Protect your pipelines for all branches\n**Admin > Settings > CI**\n\nPipelines are a part of jobs that execute steps in stages to automate tasks on behalf of the users of a project. They are a central component of CI/CD.\nBy default, only the default branch gets a protected pipeline. Configure your other branches with the same level of security by following [these simple steps](https://docs.gitlab.com/ee/user/project/protected_branches.html#configure-a-protected-branch). This considerably hardens your pipelines.\n\nThe security features enabled by default on protected pipelines are listed in our [documentation](https://docs.gitlab.com/ee/ci/pipelines/#pipeline-security-on-protected-branches).\n\nOnce the pipeline has run, the code will be deployed in an environment. To limit interactions with that environment and to protect it from unauthorized users, you can set your key environments as protected.\n\nPrerequisites and full process are available in the [documentation](https://docs.gitlab.com/ee/ci/environments/protected_environments.html).\n\n### Learn more \nThis is a high-level overview of the different areas to focus on when hardening your self-managed GitLab instance. A blog post can’t include every single security recommendation. That’s why we maintain detailed [security documentation](https://docs.gitlab.com/ee/security/) on how to secure your installation. \n\nPlease refer to the documentation as the single source of truth on hardening. Hopefully, with the help of the action items highlighted above, you’ll harden your self-managed GitLab instance while preserving agility and speed.\n\nIf you want to learn more about how we do security **at GitLab**, review the [security section](https://about.gitlab.com/handbook/security/) of the handbook.\n",[1307,726,725],{"slug":1726,"featured":6,"template":678},"how-to-harden-your-self-managed-gitlab-instance","content:en-us:blog:how-to-harden-your-self-managed-gitlab-instance.yml","How To Harden Your Self Managed Gitlab Instance","en-us/blog/how-to-harden-your-self-managed-gitlab-instance.yml","en-us/blog/how-to-harden-your-self-managed-gitlab-instance",{"_path":1732,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1733,"content":1739,"config":1745,"_id":1747,"_type":16,"title":1748,"_source":17,"_file":1749,"_stem":1750,"_extension":20},"/en-us/blog/overhauling-the-navigation-is-like-building-a-dream-home",{"title":1734,"description":1735,"ogTitle":1734,"ogDescription":1735,"noIndex":6,"ogImage":1736,"ogUrl":1737,"ogSiteName":692,"ogType":693,"canonicalUrls":1737,"schema":1738},"How designing platform navigation is like building a dream home","Go behind the scenes and learn how we ideated toward a new user experience for GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680235/Blog/Hero%20Images/home-improvement.jpg","https://about.gitlab.com/blog/overhauling-the-navigation-is-like-building-a-dream-home","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How designing platform navigation is like building a dream home\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Austin Regnery\"}],\n        \"datePublished\": \"2023-05-15\",\n      }",{"title":1734,"description":1735,"authors":1740,"heroImage":1736,"date":1742,"body":1743,"category":14,"tags":1744},[1741],"Austin Regnery","2023-05-15","\nDeciding on the look and feel of a new home is already challenging. The level of complexity grows even more when ideas involve altering the underlying structure itself. Every decision you make affects the feasibility of future changes. Choosing what is best can be subjective, even when rooted in the realities of physics and mathematics. Now imagine millions of people using this house, each with unique requirements. That gives you an idea of how it feels to update the navigation experience of GitLab. Picking a fork in the road means it won't be perfect for every user, but it needs to work well for the vast majority.\n\nWe'll share how our design process ideated through each solution to make a vision into reality.\n\n## Dreaming of a new home\n\nYou might dream of an extra bedroom, an open kitchen, or more efficient use of space. Regardless, it all starts with a vision of what comes next. In our release post, we shared how our [North Star vision](/blog/gitlab-product-navigation/#establishing-a-north-star) drove our direction. We used these themes to focus on the meaningful foundational elements as we explored new concepts. We built our design assumptions around these themes before filling out user interface elements.\n\n### Theme 1: Minimize the feeling of being overwhelmed\n\nThe project and group left sidebars have been growing. Features were in different places and often required users to search for the page they needed. We started addressing these issues in the following ways:\n\n- Reorganize page elements into consistent collections across groups and projects to reduce confusion.\n- Start everyone with sensible defaults for a baseline that accommodates most user needs.\n- Provide customizable options in the left sidebar which could reduce discovery time in the future.\n- Give back screen real estate so the focus remains on the page content and task.\n\n### Theme 2: Orient users across the platform\n\nIt could be difficult for a user to know where they were inside GitLab. Landmark clues like sidebars, breadcrumbs, and page titles weren't consistent and occasionally were missing. Without these wayfinding cues, jumping from one task to the next was challenging if the next thing wasn't directly in front of the user. To give a sense of place, we:\n\n- Show the pages specific to a given context by displaying all available options in the left sidebar.\n- Fix breadcrumbs at the top of the window to help users retain their context even when scrolling.\n\n### Theme 3: Allow users to pick up where they left off easily\n\nThere can be a lot going on at one time in GitLab, so it can be hard to know what to do next. It should feel natural to transition into GitLab and get started. Helping users transition means we must: \n\n- Make it clear that the homepage of GitLab can redirect anyone to their next task.\n- Keep things familiar so that anyone will feel right at home.\n- Reduce the number of page visits required to jump between tasks.\n\n## Visualizing the navigation layout \n\nIt's helpful to envision how you would use each room in a house before pulling it all together. We started by visualizing five different concepts, and each idea explores a unique design choice to address our assumptions.\n\n| Idea 1: Minimal features on display |\n| ------ |\n| There are so many features that could appear in the sidebar. How might it feel to only show a select number of them? |\n| ![Minimal features](https://about.gitlab.com/images/blogimages/overhauling-the-navigation/minimal-features.png) |\n\n\u003Cbr>\n\n| Idea 2: Fewer collections to choose from |\n| ------ |\n| Organizing features into distinct collections mitigates growth but impacts discovery time. Would it be simpler to search through only a few options? |\n| ![Fewer collections](https://about.gitlab.com/images/blogimages/overhauling-the-navigation/fewer-collections.png) |\n\n\u003Cbr>\n\n| Idea 3: Sidebar broken into multiple layers|\n| ------ |\n| GitLab can feel relatively flat in its structure, but it is far more complex than that. How might we use distinct layers in the sidebar to aid navigation around the platform? |\n| ![Sidebar layers](https://about.gitlab.com/images/blogimages/overhauling-the-navigation/sidebar-layers.png) |\n\n\u003Cbr>\n\n| Idea 4: Breadcrumb navigation |\n| --- |\n| Breadcrumbs are a familiar pattern for distinguishing locations. If the breadcrumb were more prominent, how would it impact awareness? |\n| ![Breadcrumb navigation](https://about.gitlab.com/images/blogimages/overhauling-the-navigation/breadcrumb-nav.png) |\n\n\u003Cbr>\n\n| Idea 5: Static navigation elements |\n| ---|\n| The sidebar has always been specific to the context. How would making the sidebar consistent across all screens impact user mental models? |\n| ![Static navigation](https://about.gitlab.com/images/blogimages/overhauling-the-navigation/static-sidebar.png)|\n\n\u003Cbr>\n\nExploring [multiple ideas](https://gitlab.com/gitlab-org/gitlab/-/work_items/366338) let us evaluate different design decisions before settling on a final plan. We created a baseline across all the [concepts](https://gitlab.com/gitlab-org/gitlab/-/work_items/367687), exposed them to others for feedback, and tested them against a set of standardized tasks:\n\n- Task 1: Where would you go to see all issues for a project?\n- Task 2: How would you create an epic in a group?\n- Task 3: Imagine you started writing a comment and had to navigate away to address something else. Where would you go to find that comment?\n\n![Six frame panel](https://about.gitlab.com/images/blogimages/overhauling-the-navigation/six-frame-vertical.png)\n\nTwo of the concepts felt easier to use and didn't require relearning. Both of these concepts build on familiar user flows. The other four proposals were overwhelming. The breadcrumb, collapse sidebar button, search bar, and pinned section were elements universally appreciated.\n\n## Making tough choices\n\nThese insights left us with one lingering question. Should we split the navigation across a top and left bar or move it all into one sidebar? We were down to two unique layouts but torn on which worked best. We had similar concepts with a few visual differences, but the key differentiator was the layout. So we devised a research plan with tasks specific to each user persona to observe how these participants adapted to these new concepts.\n\n|  Codename: [Element Reswizzle](https://www.figma.com/proto/PMIznpz7POtRKTiurKfZSF/Vision-for-Navigation?page-id=1475%3A60336&node-id=1624%3A78006&viewport=871%2C342%2C0.23&scaling=contain&starting-point-node-id=1624%3A78006&hotspot-hints=0&hide-ui=1)| Codename: [Super Sidebar](https://www.figma.com/proto/PMIznpz7POtRKTiurKfZSF/Vision-for-Navigation?page-id=1833%3A120024&node-id=1833%3A120025&viewport=454%2C513%2C0.25&scaling=scale-down&starting-point-node-id=1833%3A120025&hotspot-hints=0&hide-ui=1)\n| --- | --- |\n|  ![Homepage of two-story](https://about.gitlab.com/images/blogimages/overhauling-the-navigation/two-story-home.png)| ![Homepage of ranch](https://about.gitlab.com/images/blogimages/overhauling-the-navigation/ranch-home.png) |\n|  ![Issue list in two-story](https://about.gitlab.com/images/blogimages/overhauling-the-navigation/two-story-issues.png) | ![Issue list in ranch](https://about.gitlab.com/images/blogimages/overhauling-the-navigation/ranch-issues.png) |\n\nWe learned from our testing that the Super Sidebar was preferred as it allowed for more intuitive navigation between projects without losing context. Super Sidebar supported mature and new GitLab users better in their most common workflows. In comparison, only mature GitLab users found the Element Reswizzle easier. However, the Element Reswizzle offered elements, like icons and tool tips, that users would like to see included in Super Sidebar. \n\nWe chose to iterate and use the layout of the Super Sidebar while including aspects that were useful from Element Reswizzle.\n\n## Planning the big move\n\nWe had tackled the big questions around the scaffolding but hadn't solidified the specifics. We also needed to define color choices, font size and weight, spacing, alignment, icon options, organization of items, and placement of features. The finer details must work seamlessly and feel right, or the scaffolding around the product will fail to demonstrate its value due to a mediocre user experience. We created an  [epic](https://gitlab.com/groups/gitlab-org/-/epics/9044) to house all the work we'd try to accomplish before overhauling the core navigation experience.\n\n## Pouring the foundation\n\nEach milestone brought new design challenges and trade-off decision-making. We knew the direction, but it was impossible to change everything in a single milestone. We wanted to ensure the navigation was functional first because the structure is what we were most concerned about getting right. \n\n![First time groups appeared in the super sidebar](https://about.gitlab.com/images/blogimages/overhauling-the-navigation/porting-functionality.png)\n\nWe first [added critical contexts](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/111186) like your work, groups, and projects while unpacking the rest of the feature set. Focusing on the foundation first allowed us to get a sense of the flow of our new house before settling in.\n\nWe had team members start trying out an alpha experience early on. Some things might seem right in a design but feel off once in production. An example of this is how users switch contexts. Initially, we created a method built into the navigation sidebar but discovered it was confusing.\n\n> \"I wouldn't expect the rest of the nav to disappear when I uncollapse the project view. I would expect it to push the other content down rather than taking over all content in the panel.\" - [comment from our feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/403059#note_1350452712)\n\n| Before | After |\n| :---: | :---: |\n| ![Context switcher before](https://about.gitlab.com/images/blogimages/overhauling-the-navigation/context-switcher-before.png) | ![Context switcher after](https://about.gitlab.com/images/blogimages/overhauling-the-navigation/context-switcher-after.png) |\n\nWe pivoted by adding more layering with dropdown disclosure to give a sense of movement. It's not the perfect solution, but it's a good iteration that holds us over until we can design a better experience.\n\n## Getting ready to show\n\n![Screenshot of the new navigation](https://about.gitlab.com/images/blogimages/overhauling-the-navigation/premier.png)\n\nHoning the navigation will take numerous rounds of feedback, several more iterations, and plenty of patience. A home is never quite finished neither is the navigation of a platform like GitLab. Before the 16.0 release, we focused on squeezing in as many [UX improvements](https://gitlab.com/gitlab-org/gitlab/-/merge_requests?scope=all&state=merged&label_name%5B%5D=group%3A%3Afoundations&milestone_title=16.0&label_name%5B%5D=UX) and  [bug fixes](https://gitlab.com/gitlab-org/gitlab/-/merge_requests?label_name%5B%5D=group%3A%3Afoundations&label_name%5B%5D=type%3A%3Abug&milestone_title=16.0&scope=all&state=merged) as possible to improve upon our prior iterations. This has brought us to a navigation experience that works beautifully. We are excited to share it with everyone. We are confident that GitLab will become more usable and beloved with each new milestone. Try out the overhauled navigation experience today, and share your thoughts with us in our [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/409005).",[1144,675,959],{"slug":1746,"featured":6,"template":678},"overhauling-the-navigation-is-like-building-a-dream-home","content:en-us:blog:overhauling-the-navigation-is-like-building-a-dream-home.yml","Overhauling The Navigation Is Like Building A Dream Home","en-us/blog/overhauling-the-navigation-is-like-building-a-dream-home.yml","en-us/blog/overhauling-the-navigation-is-like-building-a-dream-home",{"_path":1752,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1753,"content":1758,"config":1765,"_id":1767,"_type":16,"title":1768,"_source":17,"_file":1769,"_stem":1770,"_extension":20},"/en-us/blog/building-gitlab-with-gitlab-api-fuzzing-workflow",{"title":1754,"description":1755,"ogTitle":1754,"ogDescription":1755,"noIndex":6,"ogImage":1457,"ogUrl":1756,"ogSiteName":692,"ogType":693,"canonicalUrls":1756,"schema":1757},"Building GitLab with GitLab: Web API Fuzz Testing","Our new series shows how we dogfood new DevSecOps platform features to ready them for you. First up, security testing.","https://about.gitlab.com/blog/building-gitlab-with-gitlab-api-fuzzing-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Building GitLab with GitLab: Web API Fuzz Testing\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mike Eddington\"},{\"@type\":\"Person\",\"name\":\"Eugene Lim\"}],\n        \"datePublished\": \"2023-05-09\",\n      }",{"title":1754,"description":1755,"authors":1759,"heroImage":1457,"date":1762,"body":1763,"category":14,"tags":1764},[1760,1761],"Mike Eddington","Eugene Lim","2023-05-09","\n\nAt GitLab, we try to [dogfood everything](/handbook/product/product-processes/#dogfood-everything) to help us better understand the product, pain points, and configuration issues. We use what we learn to build a more efficient, feature-rich platform and user experience. In this first installment of our “Building GitLab with GitLab” series, we will focus on security testing. We constantly strive to improve our security testing coverage and integrate it into our DevSecOps lifecycle. These considerations formed the motivation for the API fuzzing dogfooding project at GitLab. By sharing our lessons from building this workflow, we hope other teams can also learn how to integrate GitLab’s Web API Fuzz Testing and solve some common challenges.\n\n## What is Web API Fuzz Testing?\n\nWeb API Fuzz Testing involves generating and sending various unexpected input parameters to a web API in an attempt to trigger unexpected behavior and errors in the API backend. By analyzing these errors, you can discover bugs and potential security issues missed by other scanners that focus on specific vulnerabilities. GitLab's Web API Fuzz Testing complements and should be run in addition to GitLab Secure’s other security scanners such as static application security testing ([SAST](https://docs.gitlab.com/ee/user/application_security/sast/)) and dynamic application security testing ([DAST](https://docs.gitlab.com/ee/user/application_security/dast/)) APIs.\n\n## Auto-generating an OpenAPI specification\nTo run the Web API Fuzzing Analyzer, you need one of the following:\n* OpenAPI Specification - Version 2 or 3\n* GraphQL Schema\n* HTTP Archive (HAR)\n* Postman Collection - Version 2.0 or 2.1\n\nAt the start of the API fuzzing project, the [API Vision working group](/company/team/structure/working-groups/api-vision/) was also working on an issue to automatically document [GitLab’s REST API endpoints in an OpenAPI specification](https://gitlab.com/groups/gitlab-org/-/epics/8636), so we worked with our colleague Andy Soiron on implementing it. Because GitLab uses the [grape](https://github.com/ruby-grape/grape) API framework, Andy had already identified and [tested](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/95877) the [grape-swagger](https://github.com/ruby-grape/grape-swagger) gem that auto-generates an OpenAPI v2 specification based on existing grape annotations. For example, the following API endpoint code:\n\n```\n     Class.new(Grape::API) do\n       format :json\n       desc 'This gets something.'\n       get '/something' do\n         { bla: 'something' }\n       end\n       add_swagger_documentation\n     end\n``` \nWill be parsed by grape-swagger into:\n\n```\n{\n  // rest of OpenAPI v2 specification\n  …\n  \"paths\": {\n    \"/something\": {\n      \"get\": {\n        \"description\": \"This gets something.\",\n        \"produces\": [\n          \"application/json\"\n        ],\n        \"operationId\": \"getSomething\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"This gets something.\"\n          }\n        }\n      }\n    }\n  }\n}\n```\n\n\nHowever, with almost 2,000 API operations with different requirements and formats, a lot of additional work needed to be done to resolve edge cases that did not meet the requirements of grape-swagger or the OpenAPI format. For example, one simple case was API endpoints that accept file parameters, such as the [upload metric image endpoint](https://docs.gitlab.com/ee/api/issues.html#upload-metric-image). GitLab uses the [Workhorse](https://gitlab.com/gitlab-org/gitlab/tree/master/workhorse) smart reverse proxy to handle \"large\" HTTP requests such as file uploads. As such, file parameters must be of the type WorkhorseFile:\n\n\n```\nnamespace ':id/issues/:issue_iid/metric_images' do\n            …\n            desc 'Upload a metric image for an issue' do\n              success Entities::IssuableMetricImage\n            end\n            params do\n              requires :file, type: ::API::Validations::Types::WorkhorseFile, desc: 'The image file to be uploaded'\n              optional :url, type: String, desc: 'The url to view more metric info'\n              optional :url_text, type: String, desc: 'A description of the image or URL'\n            end\n            post do\n              require_gitlab_workhorse!\n```\n\nBecause grape-swagger does not recognize what OpenAPI type WorkhorseFile corresponds to, it excludes the parameter from its output. We fixed this by adding a grape-swagger-specific documentation to override the type during generation:\n\n```\n             requires :file, type: ::API::Validations::Types::WorkhorseFile, desc: 'The image file to be uploaded', documentation: { type: 'file' }\n```\n\nHowever, not all edge cases could be resolved with a simple match-and-replace in the grape annotations. For example, Ruby on Rails supports wildcard segment parameters. A route like `get 'books/*section/:title'` would match`books/some/section/last-words-a-memoir`. In addition, the URI would be parsed such that the `section` path parameter would have the value `some/section` and the `title` path parameter would have the value `last-words-a-memoir`.\n\nCurrently, grape-swagger does not recognize these wildcard segments as path parameters. For example, the route would generate:\n\n```\n\"paths\": {\n  \"/api/v2/books/*section/{title}\": {\n    \"get\": {\n    ...\n      \"parameters\": [\n         {\n           \"in\": \"query\", \"name\": \"*section\"\n           ...\n  }\n}\n```\n\nInstead of the expected:\n\n```\n\"paths\": {\n  \"/api/v2/books/{section}/{title}\": {\n    \"get\": {\n    ...\n      \"parameters\": [\n         {\n           \"in\": \"path\", \"name\": \"section\"\n           ...\n  }\n}\n```\n\nAs such, we also needed to make several patches to grape-swagger, which we forked while waiting for the changes to be accepted upstream. Nevertheless, with lots of careful checking and cooperation across teams, we managed to get the OpenAPI specification generated for most of the endpoints.\n\n## Performance tuning\n\nWith the OpenAPI specification, we could now begin with the API fuzzing. GitLab already uses the [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/) feature to generate testing environments for some feature changes, providing a readily available fuzzing target. However, given the large number of endpoints, it would be impossible to expect a standard shared runner to complete fuzzing in a single job. The Web API Fuzz Testing documentation includes a [performance tuning section](https://docs.gitlab.com/ee/user/application_security/api_fuzzing/#performance-tuning-and-testing-speed) that recommends the following:\n\n* using a multi-CPU Runner\n* excluding slow operations\n* splitting a test into multiple jobs\n* excluding operations in feature branches, but not default branch\n\nThe first recommendation was easy to implement with a dedicated fuzzing runner. We recommend doing this for large scheduled fuzzing workflows, especially if you select the Long-100 fuzzing profile. We also began excluding slow operations by checking the job logs for the time taken to complete each operation. Along the way, we identified other endpoints that needed to be excluded, such as the [revoke token endpoint](https://docs.gitlab.com/ee/api/personal_access_tokens.html#revoke-a-personal-access-token) that prematurely ended the fuzzing session.\n\nSplitting the test into multiple jobs took the most effort due to the requirements of the OpenAPI format. Each OpenAPI document includes a required set of objects and fields, so it is not simply a matter of splitting after a fixed number of lines. Additionally, each operation relies on entities defined in the definitions object, so we needed to ensure that when splitting the OpenAPI specification, the entities required by the endpoints were included. We also wrote a quick script to fill the example parameter data with actual data from the testing environment, such as project IDs.\n\nWhile it was possible to run these scripts locally, then push the split jobs and OpenAPI specifications to the repository, this created a large number of changes every time we updated the original OpenAPI specification. Instead, we adapted the workflow to use dynamically generated child pipelines that would split the OpenAPI document in a CI job, then generate a child pipeline with jobs for each split document. This made iterating a lot easier and more agile. We have uploaded [the scripts and pipeline configuration](https://gitlab.com/eugene_lim/api-fuzzing-dogfooding) for reference.\n\nBy tweaking the number of parallel jobs and fuzzing profile, we were eventually able to achieve a reasonably comprehensive fuzzing session in an acceptable time frame. When tuning your own fuzzing workflow, balancing these trade-offs is essential.\n\n## Triaging the API fuzzing findings\n\nWith the fuzzing done, we were now confronted with hundreds of findings. Unlike DAST analyzers that try to detect specific vulnerabilities, Web API Fuzz Testing looks for unexpected behavior and errors that may not necessarily be vulnerabilities. This is why fuzzing faults discovered by the API Fuzzing Analyzer show up as vulnerabilities with a severity of “Unknown.” This requires more involved triaging.\n\nFortunately, the Web API fuzzer also outputs Postman collections as artifacts in the Vulnerability Report page. These collections allow you to quickly repeat requests that triggered a fault during fuzzing. For this stage of the fuzzing workflow, we recommend that you set up a local instance of the application so that you can easily check logs and debug specific faults. In this case, we ran the [GitLab Development Kit](https://gitlab.com/gitlab-org/gitlab-development-kit).\n\nMany of the faults occurred due to a lack of error handling for unexpected inputs. We created issues from the Vulnerability Report page, and if we found that a particular fault had the same root cause as a previously triaged fault, we linked the vulnerability to the original issue instead.\n\n## Lessons learned\n\nThe API fuzzing dogfooding project turned out to be a fruitful exercise that benefited other workstreams at GitLab, such as the API documentation project. In addition, tuning and triaging helped us identify key pain points in the process for improvement. Automated API documentation generation is difficult even with OpenAPI, particularly on a long-lived codebase. GitLab’s existing annotations and tests helped speed up documentation via a distributed, asynchronous workflow across multiple teams. In addition, many GitLab features such as Review Apps, Vulnerability Reports, and dynamically generated child pipelines helped us build a robust fuzzing workflow.\n\nThere are still many improvements that can be made to the workflow. Moving to OpenAPI v3 could improve endpoint coverage. The Secure team also wrote a [HAR Recorder](https://gitlab.com/gitlab-org/security-products/har-recorder) tool that could help generate HAR files on the fly instead of relying on static documentation. For now, due to the high compute cost of fuzzing thousands of operations in GitLab’s API, the workflow is better suited to a scheduled pipeline instead of GitLab’s core pipeline.\n\nFor teams that have already implemented several layers of static and dynamic checks and want to take further steps to increase coverage, we recommend trying a Web API fuzzing exercise as a way to validate assumptions and discover “unknown unknowns” in your code.\n\nWe encourage you to get familiar with API fuzzing and let us know how it works for you. If you face any issues or have any feedback, please file an issue at the [issue tracker on GitLab.com](https://gitlab.com/gitlab-org/gitlab/-/issues/). Use the `~\"Category:API Security\"` label when opening a new issue regarding API fuzzing to ensure it is quickly reviewed by the appropriate team members.\n",[915,1307,915,1328,726],{"slug":1766,"featured":6,"template":678},"building-gitlab-with-gitlab-api-fuzzing-workflow","content:en-us:blog:building-gitlab-with-gitlab-api-fuzzing-workflow.yml","Building Gitlab With Gitlab Api Fuzzing Workflow","en-us/blog/building-gitlab-with-gitlab-api-fuzzing-workflow.yml","en-us/blog/building-gitlab-with-gitlab-api-fuzzing-workflow",{"_path":1772,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1773,"content":1779,"config":1784,"_id":1786,"_type":16,"title":1787,"_source":17,"_file":1788,"_stem":1789,"_extension":20},"/en-us/blog/use-inputs-in-includable-files",{"title":1774,"description":1775,"ogTitle":1774,"ogDescription":1775,"noIndex":6,"ogImage":1776,"ogUrl":1777,"ogSiteName":692,"ogType":693,"canonicalUrls":1777,"schema":1778},"Define input parameters to includable CI/CD configuration files","This is the first milestone of the long-term roadmap of the CI/CD Components Catalog roadmap.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679170/Blog/Hero%20Images/migration-data.jpg","https://about.gitlab.com/blog/use-inputs-in-includable-files","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Define input parameters to includable CI/CD configuration files\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2023-05-08\",\n      }",{"title":1774,"description":1775,"authors":1780,"heroImage":1776,"date":1781,"body":1782,"category":14,"tags":1783},[1019],"2023-05-08","\nIn GitLab 15.11, we introduced an exciting new feature that allows users to [define input parameters for includable configuration files](/releases/2023/04/22/gitlab-15-11-released/#define-inputs-for-included-cicd-configuration). With the ability to use input parameters in [CI templates](https://docs.gitlab.com/ee/development/cicd/templates.html), you can replace any keyword in the template with a parameter, including stage, script, or job name. For example, you can add a prefix to all of the jobs to better isolate them from the pipeline into which you are including the configuration.\n\nThese input parameters can be declared as mandatory or optional for each configuration file, reducing the need for global variables and making your CI/CD templates more robust and isolated. The input parameters are scoped to the included configuration only, which means they have no impact on the rest of the pipeline. This allows you to declare and enforce constraints, for example by enforcing mandatory inputs for templates.\n\nThis development is the first milestone of the long-term roadmap of the [CI/CD Components Catalog](https://gitlab.com/groups/gitlab-org/-/epics/7462), a new feature that will allow users to search and reuse single-purpose CI/CD configuration units with specific parameters for their use case. If you want to learn more about this exciting new development, you can read our [blog post about our CI templates feature](/blog/how-to-build-reusable-ci-templates/).\n\nIn this technical blog post, we will provide step-by-step instructions on how to define CI/CD templates with input parameters and how to use them when including templates.\n\n## Step 1: Create a template YAML document\nThe first step is to create a template YAML document that describes what input arguments can be used with the template. The second part of the template is the definition of the jobs that may include references to values using the interpolation format `$[[ inputs.input-name ]]`. You should use three dash lines between the two parts.\n\nHere is an example of a deploy-template.yml:\n\n```yaml\nspec:\n  inputs:\n    website:\n    environment:\n      default: test\n---\ndeploy:\n  stage: deploy\n  script: echo \"deploy $[[ inputs.website ]] to $[[ inputs.environment ]]\"\n```\n\nIn this template, we have defined two input parameters: website and environment. The environment parameter has a default value. In the content section, we define a job that interpolates the input arguments.\n\n## Step 2: Include the template in the CI configuration\nIn your main CI configuration file `.gitlab-ci.yml`, include the template and add input parameters using the `inputs` keyword.\n\nHere is an example of including the `deploy-template.yml` with input parameters:\n\n```yaml\ninclude:\n  - local: deploy-template.yml\n    inputs:\n      website: my-website.example.com\n```\n\nIn this example, we included a local template in our project. Note: You can use `inputs` with the other [include types](https://docs.gitlab.com/ee/ci/yaml/index.html#include) such as `include:project`, `include:template`, `include:remote`.\n\nIn the below example, we use inputs to add a prefix to jobs name, and make the stage dynamic as well.\n\n```yaml\nspec:\n  inputs:\n    website:\n    environment:\n      default: staging\n    stage:\n      default: test\n    job_prefix:\n      default: \"\"\n---\n\"$[[ inputs.job_prefix ]]deploy\":\n  stage: $[[ inputs.stage ]]\n  script: echo \"deploy $[[ inputs.website ]] to $[[ inputs.environment ]]\"\n```\n\nThen we can include it from the `.gitlab-ci.yml` with the input parameters:\n\n```\ninclude:\n  - local: deploy-template.yml\n    inputs:\n      stage: deploy\n      website: http://example.com\n      environment: production\n      job_prefix: \"my-app-\"\n```\n\nYou can [fork](https://gitlab.com/tech-marketing/ci-interpolation-example) this project, which uses the above examples:\n\n- [Dynamic job](https://gitlab.com/tech-marketing/ci-interpolation-example/-/blob/main/dynamic-job.yml)\n- [Dynamic script](https://gitlab.com/tech-marketing/ci-interpolation-example/-/blob/main/deploy-template.yml)\n- [Main CI configuration](https://gitlab.com/tech-marketing/ci-interpolation-example/-/blob/main/.gitlab-ci.yml)\n\nFor more information, please use our [online documentation](https://docs.gitlab.com/ee/ci/yaml/includes.html#define-input-parameters-with-specinputs).\n\nThat's it! You have successfully created CI templates that accept inputs and used them in a pipeline configuration. By using templates with inputs, you can simplify pipeline configuration and make templates more modular and reusable.\n\nThank you to [Fabio Pitino](https://gitlab.com/fabiopitino) and [Grzegorz Bizon](https://gitlab.com/grzesiek) for their content reviews.",[726,894,110],{"slug":1785,"featured":6,"template":678},"use-inputs-in-includable-files","content:en-us:blog:use-inputs-in-includable-files.yml","Use Inputs In Includable Files","en-us/blog/use-inputs-in-includable-files.yml","en-us/blog/use-inputs-in-includable-files",{"_path":1791,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1792,"content":1798,"config":1803,"_id":1805,"_type":16,"title":1806,"_source":17,"_file":1807,"_stem":1808,"_extension":20},"/en-us/blog/how-to-build-reusable-ci-templates",{"title":1793,"description":1794,"ogTitle":1793,"ogDescription":1794,"noIndex":6,"ogImage":1795,"ogUrl":1796,"ogSiteName":692,"ogType":693,"canonicalUrls":1796,"schema":1797},"How to build more reusable CI/CD templates","Users can now define inputs to any includable CI/CD templates. Learn how and see what other CI/CD pipeline developments are coming.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682709/Blog/Hero%20Images/pexels-mathias-reding-4386148.jpg","https://about.gitlab.com/blog/how-to-build-reusable-ci-templates","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to build more reusable CI/CD templates\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2023-05-01\",\n      }",{"title":1793,"description":1794,"authors":1799,"heroImage":1795,"date":1800,"body":1801,"category":14,"tags":1802},[1020],"2023-05-01","\n\nThere are exciting new developments to share about our CI/CD templates features, known for their ability to get users up and running quickly with [GitLab CI/CD](/topics/ci-cd/). Our goals for the immediate future are to evolve templates into CI/CD components (more details below) and, soon, to release a CI/CD components catalog to make the reusing and sharing of pipeline configurations easier and more efficient for developers, both inside of their organizations and with the wider developer community. The first step in our journey is to enable users to define inputs to any includable file, ultimately creating more powerful and reusable CI/CD templates.\n\nHere is a short walkthrough on this capability: \n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe width=\"1870\" height=\"937\" src=\"https://www.youtube.com/embed/4ZRdgBy1n5E\" title=\"\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\u003C/figure>\n\n\n## Build more robust and isolated CI/CD templates\nIn GitLab 15.11, we released, as Beta, the ability to define inputs to any includable file (any CI/CD file that you include in your pipeline). Until now, we've been leveraging environment variables to pass information. As an example, we used environment variables to pass information from an upstream pipeline to a downstream pipeline.\n\nUsing environment variables for passing information is like declaring global variables in programming languages – it has an effect on your entire pipeline, which means that the more variables we declare, the more we risk variable conflicts and increased variable scope.\n\nInput parameters are similar to variables passed to the template but exist only inside a specific scope and don't affect other templates in your pipelines. There are several benefits of using inputs, including:\n1. Inputs are not inherited from upstream includes and must be passed explicitly, which means they will never affect your entire pipeline. \n2. Inputs have full support for CI/CD interpolation, which means you have complete flexibility to \"templatize\" your pipeline and use `$[[ inputs.* ]]` across all keywords in your CI/CD configuration. \n3. You can define mandatory and optional inputs to be used as part of your CI/CD templates.\n4. You can define a default value for inputs. \n \nThis paradigm allows users to build more robust and isolated templates (which will soon evolve into components) and enables users to declare and enforce contracts. \n\n### Add your inputs and let us know what you think! \nThe ability to define inputs to a CI/CD configuration file is available right now and we'd love for users to dive in and begin adding inputs to templates. You can check out [the GitLab docs](https://docs.gitlab.com/ee/ci/yaml/includes.html#define-input-parameters-with-specinputs) and review [this example project](https://gitlab.com/grzesiek/ci-interpolation-example) to better understand how to use inputs as part of your daily workflow. If you use this feature and have feedback, please share it with us in this [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/407556).\n\n## What's next in CI/CD pipelines?\nIn GitLab 16.0, we are planning to release an experimental version of a CI/CD pipeline component, which will be the first building block of our CI/CD catalog. A pipeline component is a reusable, single-purpose building block that abstracts a single pipeline configuration unit away. To learn more, please check out this [example project](https://gitlab.com/gitlab-test-ci-catalog/catalog/ruby). \n\n### Why are we moving to components?\nComponents are preconfigured CI/CD files that automate the process of building, testing, and deploying software applications. CI/CD components provide:\n* **Versioning**: Each component is tagged with a version number, so you can reference a specific version or always use the `~latest` version.\n* **Consistency**: CI/CD components ensure consistency in your CI/CD pipelines across different projects, teams, and environments. By using a standardized approach, developers can reduce errors and improve the quality of their code.\n* **Time-savings**: CI/CD components save time by automating repetitive tasks such as running tests, building artifacts, and deploying applications. This enables developers to focus on more important tasks, like writing code and fixing bugs.\n* **Reusability**: CI/CD components can be reused across multiple projects and teams, eliminating the need to create custom scripts for each project. This saves time and reduces the risk of errors.\n* **Scalability**: CI/CD components are scalable and can be used to manage pipeline processes of large and complex applications. This enables developers to easily manage their projects as they grow.\n* **Flexibility**: CI/CD components are highly customizable and can be adapted to suit the needs of different projects, teams, and environments. This allows developers to use the tools and processes that work best for them.\n\nTL;DR: Using CI/CD components can help streamline the development process, save time, reduce errors, and improve the quality of code.\n\n### On the horizon: A CI/CD component catalog\nTo further streamline your development processes, improve the quality of your software delivery, and make it easier for developers to discover and use preconfigured components, we’ll be releasing the CI/CD component catalog, which will make using, creating, and sharing CI/CD components much more efficient and user-friendly, and we’re targeting release of this later this year. In the next months, we’ll be sharing more feature updates, blogs, docs, and demos to keep you posted on our journey toward CI/CD components and a CI/CD component catalog. We’re excited for you to test out the new capabilities as they drop, and we look forward to your feedback.  \n\nCover image by [Mathias Reding](https://www.pexels.com/@matreding/) on [Pexels](https://www.pexels.com/photo/background-of-abstract-modern-architectural-pattern-4386148/).\n{: .note}\n",[110,894],{"slug":1804,"featured":6,"template":678},"how-to-build-reusable-ci-templates","content:en-us:blog:how-to-build-reusable-ci-templates.yml","How To Build Reusable Ci Templates","en-us/blog/how-to-build-reusable-ci-templates.yml","en-us/blog/how-to-build-reusable-ci-templates",{"_path":1810,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1811,"content":1817,"config":1823,"_id":1825,"_type":16,"title":1826,"_source":17,"_file":1827,"_stem":1828,"_extension":20},"/en-us/blog/data-driven-decision-making-with-sourcewarp",{"title":1812,"description":1813,"ogTitle":1812,"ogDescription":1813,"noIndex":6,"ogImage":1814,"ogUrl":1815,"ogSiteName":692,"ogType":693,"canonicalUrls":1815,"schema":1816},"SourceWarp: Make data-driven, agile DevSecOps decisions","How the SourceWarp approach and tool help make informed, agile decisions for CI/CD tools and DevSecOps platforms at GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682686/Blog/Hero%20Images/velocity2.png","https://about.gitlab.com/blog/data-driven-decision-making-with-sourcewarp","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"SourceWarp: Make data-driven, agile DevSecOps decisions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab Vulnerability Research Team\"}],\n        \"datePublished\": \"2023-04-13\",\n      }",{"title":1812,"description":1813,"authors":1818,"heroImage":1814,"date":1820,"body":1821,"category":14,"tags":1822},[1819],"GitLab Vulnerability Research Team","2023-04-13","\n\nAt GitLab, we use different strategies to make assessments about the stability\nor robustness of a feature by means of best practices such as staging\nenvironments, feature flags, or canary testing. We also use testing\nstrategies such as [A/B testing](/handbook/marketing/digital-experience/engineering-ab-tests/)\nto assess how users react to feature variants. \n\nHowever, our short release cycles require testing and benchmarking approaches that\nmake it possible to prototype, test, and benchmark ideas quickly (ideally while\ndeveloping them). We need an approach that works on large code\nbases, can help assess a feature **before** deployment to staging or\nproduction, and provides data to support data-driven decision making.\n\nTo address this need, we developed the SourceWarp tool: a record-and-replay framework\nfor source code management systems. In this blog post, we will explain our motivation\nfor creating SourceWarp and explain how we use it to inform data-driven decision making within the GitLab platform.\n\n## Motivation: Data-driven decision making in the DevSecOps context\n\n[DevSecOps](/topics/devsecops/) streamlines software development by allowing teams to ship features quickly\nand providing short feedback cycles for customers. These short feedback cycles can be used to monitor the impact of\na feature from the time it is shipped and inform developers and product\nmanagers about the success or failure of a given deployment.\n\nGitLab, as a heterogeneous DevSecOps platform, acts as an integration point for\ndifferent [CI/CD tools](https://docs.gitlab.com/ee/ci/) that often contribute\nto user-facing functionality. For example, the [vulnerability report](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/),\nwhich displays all detected vulnerabilities, is visible\nas a single functionality, but the data in the report may come from a\nnumber of different tools in various pipelines. The DevSecOps\nplatform collects and stores results in the backend database and keeps track of user actions on the\nfindings (through the UI or the API). A large portion of the automation in the platform\nis built around or initiated by code changes where the\nsource code management system or Git respoitory basically holds the input data. In\norder to test and benchmark new features for these systems effectively, the\ntesting and benchmarking approach needs to have some source code awareness.\n\nWe can use SourceWarp to achieve this. Let's dive in to a real-world example\nof how we used SourceWarp to help make an informed decision about a product integration.\n\n## Case study: Advanced vulnerability tracking\n\nAs a DevSecOps platform, GitLab provides automation\ncentered around code changes, where the source code is stored in a source code\nmanagement system. SourceWarp uses a Git repository as input, which we use to\nsource test-input data to test and benchmark our newly developed feature.\n\nIn a record phase, SourceWarp extracts commits from the source history that are\nrelevant with respect to a given test criterion and generates a patch replay\nsequence. In the monitor phase, SourceWarp replays the generated sequence on a\ntarget system. These phases are executed while continuously monitoring the\nDevSecOps platform to collect metrics and to generate a report that provides\nthe testing and benchmarking results.\n\nWe used SourceWarp to test and benchmark [advanced vulnerability tracking](https://docs.gitlab.com/ee/user/application_security/sast/#advanced-vulnerability-tracking),\nwhich identifies and deduplicates vulnerabilities in a changing code base. In our\nbenchmarking and testing experiment, we let SourceWarp automatically sample patch\nsequences from a slice of GitLab's source code repository history (2020-10-31\nand 2020-12-31) and replay them on two target systems: One system had advanced\nvulnerability tracking enabled, and the other one was using our old\nvulnerability tracking approach.\n\nAfter the application of every patch from the\npatch sequence, SourceWarp collected metrics from the target system that\nrecorded the observed vulnerabilities. We observed that our vulnerability\ntracking approach was 30% more effective than traditional\nvulnerability tracking where `\u003Cfile, line number>` are used to identify the\nlocation of a vulnerabilty. This means that advanced vulnerabiilty tracking\nreduces the manual effort of auditing vulnerabilities by 30%.\n\nIn addition, we\nobserved that with an increasing number of source code changes, the deduplication\neffectiveness of vulnerability tracking increases. Looking at the relatively\nshort timeframe from 2020-10-31 to 2020-12-31, the deduplication effectivness\nincreased from 11% to 30%, which suggests that the effectiveness increases over\ntime as the source code evolves.\n\nSourceWarp performed this experiment in an automated and reproducible way, and\nprovided data that was helpful in making an informed decision about the product\nintegration of vulnerability tracking. \n\n## Where to find more SourceWarp information\n\nThe SourceWarp approach is detailed in our research paper, \"[SourceWarp](/resources/downloads/research-paper-ast2023-sourcewarp.pdf): A scalable, SCM-driven testing and benchmarking approach to support data-driven and agile decision making for CI/CD tools and DevSecOps platforms,\" which will be presented at the 4th ACM/IEEE International Conference on Automation of Software Test ([AST 2023](https://conf.researchr.org/home/ast-2023)). \n\nThe [SourceWarp testing and benchmarking tool](https://gitlab.com/gitlab-org/vulnerability-research/foss/sourcewarp) is implemented in Ruby and is open source (MIT license).\nThe `README.md` provides information about the tool setup and implementation.\nYou can also see it in action in the demo below.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/-9lk_Jhuq14\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\n## Useful Links\n\n- [Vulnerability Research Team](/handbook/engineering/development/sec/secure/vulnerability-research/)\n- [SourceWarp tool](https://gitlab.com/gitlab-org/vulnerability-research/foss/sourcewarp)\n- [Recorded Demo](https://www.youtube.com/watch?v=-9lk_Jhuq14)\n- [AST 2023](https://conf.researchr.org/home/ast-2023)\n- [Research Paper](/resources/downloads/research-paper-ast2023-sourcewarp.pdf)\n\nCover image by [Jason Corey](https://unsplash.com/@jason_corey_) on [Unsplash](https://unsplash.com/photos/AT5vuPoi8vc)\n{: .note}\n",[725,110,1307],{"slug":1824,"featured":6,"template":678},"data-driven-decision-making-with-sourcewarp","content:en-us:blog:data-driven-decision-making-with-sourcewarp.yml","Data Driven Decision Making With Sourcewarp","en-us/blog/data-driven-decision-making-with-sourcewarp.yml","en-us/blog/data-driven-decision-making-with-sourcewarp",{"_path":1830,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1831,"content":1837,"config":1842,"_id":1844,"_type":16,"title":1845,"_source":17,"_file":1846,"_stem":1847,"_extension":20},"/en-us/blog/automating-a-twitter-bot-using-gitlab-cicd",{"title":1832,"description":1833,"ogTitle":1832,"ogDescription":1833,"noIndex":6,"ogImage":1834,"ogUrl":1835,"ogSiteName":692,"ogType":693,"canonicalUrls":1835,"schema":1836},"How to automate a Twitter bot using GitLab CI/CD","This tutorial shows how to use the DevSecOps platform to create a set-and-forget Twitter bot.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749661856/Blog/Hero%20Images/ci-cd-demo.jpg","https://about.gitlab.com/blog/automating-a-twitter-bot-using-gitlab-cicd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to automate a Twitter bot using GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Siddharth Mathur\"}],\n        \"datePublished\": \"2023-03-21\",\n      }",{"title":1832,"description":1833,"authors":1838,"heroImage":1834,"date":1839,"body":1840,"category":14,"tags":1841},[1200],"2023-03-21","\n\nGitLab's CI/CD pipelines are great for automating many things, like deployments to Google Kubernetes Engine and security scans. But did you know that you could use GitLab CI/CD pipelines to run a set-and-forget Twitter bot?\n\nMany organizations today are leveraging Twitter's API to [understand customer sentiment](https://developer.twitter.com/en/blog/success-stories/target), [track public health data](https://developer.twitter.com/en/blog/success-stories/penn), [perform financial analysis](https://developer.twitter.com/en/blog/success-stories/likefolio), and more. While these bots may be running on self-managed infrastrucuture or external services, you can simplify and consolidate your tooling by leveraging GitLab instead, making your bot easier to manage.\n\nWith GitLab's [Free tier](/pricing/), you can leverage 400 minutes of CI/CD run time per month to automatically analyze and post tweets. With GitLab [Premium](/pricing/premium) and [Ultimate](/pricing/ultimate), you'll get even more pipeline minutes to tweet more, run longer natural language processing analyses, or for other projects.\n\nSetting up a Twitter bot using GitLab is pretty simple. At the end of this blog, you'll have a project that looks like [this](https://gitlab.com/smathur/twitter-bot), and a Twitter account that automatically posts a simple tweet.\n\nTo get started, you'll need these prerequisites:\n- GitLab account (self-hosted with GitLab Runner(s) set up or on GitLab.com)\n- Twitter API credentials\n\nOnce you've generated your Twitter API credentials, we can start building out our bot in GitLab. In this blog, we'll leverage GitLab's Web IDE based on Visual Studio Code, but feel free to use a code editor of your choice.\n\n## Step 1: Write a Python script to post tweets\n\n![Navigate to the Web IDE](https://about.gitlab.com/images/blogimages/2023-03-10-automating-a-twitter-bot-using-gitlab-cicd/web-ide.png){: .shadow}\n\nCreate a new blank project in GitLab, and click the \"Web IDE\" button to start writing some code. In the Web IDE, create a new file called `run_bot.py`, and paste the following code (this is where you interact with the Twitter API):\n\n```python\nimport tweepy\nimport config\n\ndef set_up():\n\tauth = tweepy.OAuthHandler(config.consumer_key, config.consumer_secret_key)\n\tauth.set_access_token(config.access_token, config.access_token_secret)\n\tapi = tweepy.API(auth)\n\treturn api\n\ndef run(tweet):\n\tapi = set_up()\n\tapi.update_status(tweet)\n\nrun('It\\'s Tanuki time')\n```\n\n**Note:** If you're familiar with Python, you'll notice that we're importing a file called `config` with some variables that we're using. This `config` file doesn't exist yet, but we'll create it from within a GitLab pipeline, leveraging CI/CD variables to securely store and use our Twitter API credentials.\n\nCreate another file called `requirements.txt`, and paste the following line:\n\n```\ntweepy\n```\n\nChanges to files in the Web IDE will be automatically saved, so switch to the Git tab and commit your changes.\n\n## Step 2: Create a CI/CD pipeline to run your Python script\n\nNext, we'll create a CI/CD pipeline script to run our Twitter bot and post a tweet every time the pipeline is run. To do this, you can:\n1. Create a new file using the Web IDE called `.gitlab-ci.yml`, or\n2. Head to your GitLab project, and from the sidebar, click CI/CD > Editor.\n\nIf you see some default text in the pipeline configuration, delete everything to start with a clean slate.\n\nIn the pipeline YAML file, we'll first specify the Docker image we want to run the bot on:\n\n```yaml\nimage: python:latest\n```\n\n**Note:** Normally in a pipeline, we would define stages first and then write jobs that are each assigned to a specific stage. Since we're only running one job in this pipeline, we don't need to specify stages at the top of our pipeline configuration file.\n\nNext, we'll add a job called `run` that runs the Python script we created in the previous step. Inside this job, we'll add a `script` section to run some commands that will execute our Python script.\n\n```yaml\nrun:\n  script:\n    - echo \"consumer_key = '$CONSUMER_KEY'\" >> config.py\n    - echo \"consumer_secret_key = '$CONSUMER_SECRET'\" >> config.py\n    - echo \"access_token = '$ACCESS_TOKEN'\" >> config.py\n    - echo \"access_token_secret = '$ACCESS_SECRET'\" >> config.py\n    - pip install -r requirements.txt\n    - python3 run_bot.py\n```\n\nCommit your changes. The pipeline will automatically run, since you just made a change to the project files, but it will fail. This is because we are calling some CI/CD variables in the pipeline, which we haven't set yet. Let's go ahead and do that!\n\n## Step 3: Set CI/CD variables to store API tokens\n\nHead to your GitLab project and from the sidebar, go to Settings > CI/CD.\n\nExpand the \"Variables\" section and add the `ACCESS_SECRET`, `ACCESS_TOKEN`, `CONSUMER_KEY`, and `CONSUMER_SECRET` variables as shown below (these are your Twitter API credentials):\n\n![CI/CD variables](https://about.gitlab.com/images/blogimages/2023-03-10-automating-a-twitter-bot-using-gitlab-cicd/ci-cd-variables.png){: .shadow}\n\nNote that the secrets are masked to prevent them from showing up in job logs (check the \"Mask variable\" box when creating/editing the variable).\n\n## Step 4: Test and schedule your Twitter bot\n\nNow that we've got everything set up, all we need to do is run the bot. Go to CI/CD > Pipelines, and click \"Run pipeline\". Click \"Run pipeline\" again, and wait for the `run` job to finish. If you've set up your Twitter credentials correctly, you should see that the pipeline successfully ran, and a tweet was posted on your bot account!\n\n![Schedule a pipeline](https://about.gitlab.com/images/blogimages/2023-03-10-automating-a-twitter-bot-using-gitlab-cicd/schedule-pipeline.png){: .shadow}\n\nOnce you've verified that your pipeline runs successfully, schedule your pipeline to automatically run at a regular interval. Go to CI/CD > Schedules, and click \"New schedule\". Feel free to use one of the default provided intervals, or use cron to set a custom schedule. Specify a timezone, and ensure that the \"Active\" checkbox is checked. Finally, click \"Save pipeline schedule\". You'll see that your pipeline has been scheduled to run, and when it will run next.\n\nAnd that's it! You now have a fully-functional Twitter bot running on GitLab, using CI/CD pipelines to automatically post tweets. While this demo Twitter bot simply posts a specified text message, you can add your own logic to [generate sentences using AI](https://linguatools.org/language-apis/sentence-generating-api/), [perform sentiment analysis on other users' tweets](https://www.analyticsvidhya.com/blog/2021/06/twitter-sentiment-analysis-a-nlp-use-case-for-beginners/), and more. Running a Twitter bot is just one of the many ways you can leverage pipelines in GitLab, and you can also check out some other [interesting use cases](https://docs.gitlab.com/ee/ci/examples/).\n",[725,832,937,726],{"slug":1843,"featured":6,"template":678},"automating-a-twitter-bot-using-gitlab-cicd","content:en-us:blog:automating-a-twitter-bot-using-gitlab-cicd.yml","Automating A Twitter Bot Using Gitlab Cicd","en-us/blog/automating-a-twitter-bot-using-gitlab-cicd.yml","en-us/blog/automating-a-twitter-bot-using-gitlab-cicd",{"_path":1849,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1850,"content":1856,"config":1861,"_id":1863,"_type":16,"title":1864,"_source":17,"_file":1865,"_stem":1866,"_extension":20},"/en-us/blog/scaling-repository-maintenance",{"title":1851,"description":1852,"ogTitle":1851,"ogDescription":1852,"noIndex":6,"ogImage":1853,"ogUrl":1854,"ogSiteName":692,"ogType":693,"canonicalUrls":1854,"schema":1855},"Future-proofing Git repository maintenance","Learn how we revamped our architecture for faster iteration and more efficiently maintained repositories.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677736/Blog/Hero%20Images/Git.png","https://about.gitlab.com/blog/scaling-repository-maintenance","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Future-proofing Git repository maintenance\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Patrick Steinhardt\"}],\n        \"datePublished\": \"2023-03-20\",\n      }",{"title":1851,"description":1852,"authors":1857,"heroImage":1853,"date":1858,"body":1859,"category":14,"tags":1860},[1282],"2023-03-20","\n\nUsers get the most from [Gitaly](/direction/gitaly/#gitaly-1), the service responsible for the storage and maintenance of all Git repositories in GitLab, when traffic hitting it is efficiently handled. Therefore, we must ensure our Git repositories remain in a well-optimized state. When it comes to Git monorepositories, this maintenance can be a complex task that can cause a lot of overhead by itself because repository housekeeping becomes more expensive the larger the repositories get. This blog post explains in depth what we have done over the past few GitLab releases to rework our approach to repository housekeeping for better scaling and to maintain an optimized state to deliver the best peformance for our users.\n\n## The challenge with Git monorepository maintenance\n\nTo ensure that Git repositories remain performant, Git regularly runs a set of\nmaintenance tasks. On the client side, this usually happens by automatically\nrunning `git-gc(1)` periodically, which:\n\n- Compresses revisions into a `packed-refs` file.\n- Compresses objects into `packfiles`.\n- Prunes objects that aren't reachable by any of the revisions and that have\n  not been used for a while.\n- Generates and updates data structures like `commit-graphs` that help to speed\n  up queries against the Git repository.\n\nGit periodically runs `git gc --auto` automatically in the background, which\nanalyzes your repository and only performs maintenance tasks if required.\n\nAt GitLab, we can't use this infrastructure because it does not give us enough\ncontrol over which maintenance tasks are executed at what point in time.\nFurthermore, it does not give us full control over exactly which data\nstructures we opt in to. Instead, we have implemented our own maintenance\nstrategies that are specific to how GitLab works and catered to our specific\nneeds. Unfortunately, the way GitLab implemented repository maintenance has\nbeen limiting us for quite a while by now.\n\n- It is unsuitable for large monorepositories.\n- It does not give us the ability to easily iterate on the employed maintenance\n  strategy.\n\nThis post explains our previous maintenance strategy and its problems as well as\nhow we revamped the architecture to allow us to iterate faster and more\nefficiently maintain repositories.\n\n## Our previous repository maintenance strategy\n\nIn the early days of GitLab, most of the application ran on a single server.\nOn this single server, GitLab directly accessed Git repositories. For various\nreasons, this architecture limited us, so we created the standalone Gitaly\nserver that provides a gRPC API to access Git repositories.\n\nTo migrate to exclusively accessing Git repository data using Gitaly we:\n\n- Migrated all the logic that was previously contained in the Rails\n   application to Gitaly.\n- Created Gitaly RPCs and updated Rails to not execute the logic directly, but\n   instead invoke the newly-implemented RPC.\n\nWhile this was the easiest way to tackle the huge task back then, the end\nresult was that there were still quite a few areas in the Rails codebase that\nrelied on knowing how the Git repositories were stored on disk.\n\nOne such area was repository maintenance. In an ideal world, the Rails server\nwould not need to know about the on-disk state of a Git repository. Instead,\nthe Rails server would only care about the data it wants to get out of the\nrepository or commit to it. Because of the Gitaly migration path we took,\nthe Rails application was still responsible for executing fine-grained\nrepository maintenance by calling certain RPCs:\n\n- `Cleanup` to delete stale, temporary files that have accumulated\n- `RepackIncremental` and `RepackFull` to either pack all loose objects into a\n  new packfile or alternatively to repack all packfiles into a single one\n- `PackRefs` to compress all references into a single `packed-refs` file\n- `WriteCommitGraph` to update the commit-graph\n- `GarbageCollect` to perform various different tasks\n\nThese low-level details of repository maintenance were being managed by the\nclient. But because clients didn't have any information on the on-disk state of\nthe repository, they could not even determine which of these maintenance tasks\nhad to be executed in the first place. Instead, we had a very simple heuristic:\nEvery few pushes, we ran one of the above RPCs to perform one of the maintenance\ntasks. While this heuristic worked, it wasn't great for the following reasons:\n\n- Repositories can be modified without using pushes at all. So if users only\n  use the Web IDE to commit to repositories, they may not get repacked at all.\n- Because repository maintenance is controlled by the client, Gitaly can't\n  assume a specific repository state.\n- The threshold for executing housekeeping tasks is set globally across all\n  projects rather than on a per-project basis. Consequently, no matter\n  whether you have a tiny repository or a huge monorepository, we would use the\n  same intervals for executing maintenance tasks. As you may imagine though,\n  doing a full repack of a Git repository that is only a few dozen megabytes in\n  size is a few orders of magnitudes faster than repacking a monorepository\n  that is multiple gigabytes in size.\n- Specific types of Git repositories hosted by Gitaly need special care and we\n  required Gitaly clients to know about these.\n- Repository maintenance was inefficient overall. Clients do not know about the\n  on-disk state of repositories. Consequently, they had no choice except to\n  repeatedly ask Gitaly to optimize specific data structures without knowing\n  whether this was required in the first place.\n\n## Heuristical maintenance strategy\n\nIt was clear that we needed to change the strategy we used for repository\nmaintenance. Most importantly, we wanted to:\n\n- Make Gitaly the single source of truth for how we maintain repositories.\n  Clients should not need to worry about low-level specifics, and Gitaly should\n  be able to easily iterate on the strategy.\n- Make the default maintenance strategy work for repositories of all sizes.\n- Make the maintenance strategy work for repositories of all types. A client\n  should not need to worry about which maintenance tasks must be executed for\n  what repository type.\n- Avoid optimizing data structures that already are in an optimal state.\n- Improve visibility into the optimizations we perform.\n\nAs mentioned in the introduction, Git periodically runs `git gc --auto`. This\ncommand inspects the repository's state and performs optimizations only when it\nfinds that the repository is in a sufficiently bad state to warrant the cost.\nWhile using this command directly in the context of Gitaly does not give us\nenough flexibility, it did serve as the inspiration for our new architecture.\n\nInstead of providing fine-grained RPCs to maintain various parts of a Git\nrepository, we now only provide a single RPC `OptimizeRepository` that works as\na black-box to the caller. This RPC call:\n\n1. Cleans up stale data in the repository if there is any.\n1. Analyzes the on-disk state of the repository.\n1. Depending on this on-disk state, performs only these maintenance tasks that\n   are deemed to be necessary.\n\nBecause we can analyze and use the on-disk state of the repository, we can be\nfar more intelligent about repository maintenance compared to the previous\nstrategy where we optimized some bits of the repository every few pushes.\n\n### Packing objects\n\nIn the old-style repository maintenance, the client would call either\n`RepackIncremental` or `RepackFull`. This would either: Pack all loose objects into a new `packfile` or repack all objects into a single `packfile`.\n\nBy default, we would perform a full repack every five repacks. While this may be\na good default for small repositories, it gets prohibitively expensive for huge\nmonorepositories where a full repack may easily take several minutes.\n\nThe new heuristical maintenance strategy instead scales the allowed number of\n`packfiles` by the total size of all combined `packfiles`. As a result, the\nlarger the repository becomes, the less frequently we perform a full repack.\n\n### Pruning objects\n\nIn the past, clients would periodically call `GarbageCollect`. In addition to\nrepacking objects, this RPC would also prune any objects that are unreachable\nand that haven't been accessed for a specific grace period.\n\nThe new heuristical maintenance strategy scans through all loose objects that\nexist in the repository. If the number of loose objects that have a modification\ntime older than two weeks exceeds a certain threshold, it spawns the\n`git prune` command to prune these objects.\n\n### Packing references\n\nIn the past, clients would call `PackRefs` to repack references into the\n`packed-refs` file.\n\nBecause the time to compress references scales with the size of the\n`packed-refs` file, the new heuristical maintenance strategy takes into account\nboth the size of the `packed-refs` file and the number of loose references that\nexist in the repository. If a ratio between these two figures is exceeded, we\ncompress the loose references.\n\n### Auxiliary data structures\n\nThere are auxiliary data structures like `commit-graphs` that are used by Git\nto speed up various queries. With the new heuristical maintenance strategy,\nGitaly now automatically updates these as required, either when they are\ndeemed to be out-of-date, or when they are missing altogether.\n\n### Heuristical maintenance strategy rollout\n\nWe rolled out this new heuristical maintenance strategy to GitLab.com in March 2022. Initially, we only rolled it out for\n[`gitlab-org/gitlab`](https://gitlab.com/gitlab-org/gitlab), which is a\nrepository where maintenance performed particularly poorly in the past. You can\nsee the impact of the rollout in the following graph:\n\n![Latency of OptimizeRepository for gitlab-org/gitlab](https://about.gitlab.com/images/blogimages/repo-housekeeping-gitlab-org-gitlab-latency.png)\n\nIn this graph, you can see that:\n\n1. Until March 19, we used the legacy fine-grained RPC calls. We spent most\n   of the time in `RepackFull`, followed by `RepackIncremental` and `GarbageCollect`.\n1. Because March 19 and 20 occurred on a weekend, nothing much happens with\n   housekeeping.\n1. Early on March 21 we switched `gitlab-org/gitlab` to use heuristical\n   housekeeping using `OptimizeRepository`. Initially, there didn't seem to be\n   much of an improvement. There wasn't much difference in how much time we\n   spent maintaining this repository compared to the past.\n\n   However, this was caused by an inefficient heuristic. Instead of only pruning\n   objects when there were stale ones, we always pruned objects when we saw that\n   there were too many loose objects.\n1. We deployed a fix for this bug on March 22, which led to a significant drop in\n   time spent optimizing this repository compared to before.\n\nThis demonstrated two things:\n\n- We're easily able to iterate on the heuristics that we have in Gitaly.\n- Using the heuristics saves a lot of compute time as we don't unnecessarily\n  optimize anymore.\n\nWe have subsequently rolled this out to all of GitLab.com, starting on March\n29, 2022, with similar improvements. With this change, we more than halved the CPU\nload when performing repository optimizations.\n\n## Observability\n\nWhile it is great that `OptimizeRepository` has managed to save us a lot of\ncompute power, one goal was to improve visibility into repository housekeeping.\nMore specifically, we wanted to:\n\n- Gain visibility on the global level to see what optimizations are performed\n  across all of our repositories.\n- Gain visibility on the repository level to know what state a specific\n  repository is in.\n\nIn order to improve global visibility, we expose a set of Prometheus metrics that\nallow us to observe important details about our repository maintenance. The\nfollowing graphs show the optimizations performed in a 30-minute window of our\nproduction systems on GitLab.com.\n\n- The optimizations, which are being performed in general.\n\n  ![Repository optimization metrics for GitLab.com](https://about.gitlab.com/images/blogimages/repo-housekeeping-metrics-optimizations.png)\n\n- The average latency it takes to perform each of these optimizations.\n\n  ![Repository optimization metrics for GitLab.com](https://about.gitlab.com/images/blogimages/repo-housekeeping-metrics-latencies.png)\n\n- What kind of stale data we are cleaning up.\n\n  ![Repository optimization metrics for GitLab.com](https://about.gitlab.com/images/blogimages/repo-housekeeping-metrics-cleanups.png)\n\nTo improve visibility into the state each repository is in we have started to\nlog structured data that includes all the relevant bits. A subset of the\ninformation it exposes is:\n\n- The number of loose objects and their sizes.\n- The number of `packfiles` and their combined size.\n- The number of loose references.\n- The size of the `packed-refs` file.\n- Information about `commit-graphs`, bitmaps and other auxiliary data\n  structures.\n\nThis information is also exposed through Prometheus metrics:\n\n![Repository state metrics for GitLab.com](https://about.gitlab.com/images/blogimages/repo-state-metrics.png)\n\nThese graphs expose important metrics of the on-disk state of our repositories:\n\n- The top panel shows which data structures exist.\n- The heatmaps on the left show how large specific data structures are.\n- The heatmaps on the right show how many of these data structures we have.\n\nCombining both the global and per-repository information allows us to easily\nobserve how repository maintenance behaves during normal operations. But more\nimportantly, it gives us meaningful data when rolling out new features that\nchange the way repositories are maintained.\n\n## Manually enabling heuristical housekeeping\n\nWhile the heuristical housekeeping is enabled by default starting with GitLab\n15.9, it has already been introduced with GitLab 14.10. If you want to use the\nnew housekeeping strategy before upgrading to 15.9, you can opt in by\nsetting the `optimized_housekeeping` [feature flag](https://docs.gitlab.com/ee/administration/feature_flags.html#how-to-enable-and-disable-features-behind-flags).\nYou can do so via the `gitlab-rails` console:\n\n```\nFeature.enable(:optimized_housekeeping)\n```\n\n## Future improvements\n\nWhile the new heuristical optimization strategy has been successfully\nbattle-tested for a while now for GitLab.com, at the time of writing this\nblog post, it still wasn't enabled by default for self-deployed installations.\nThis has finally changed with GitLab 15.8, where we have default-enabled the new\nheuristical maintenance strategy.\n\nWe are not done yet, though. Now that Gitaly is the only source of truth for how\nrepositories are optimized, we are tracking improvements to our maintenance\nstrategy in [epic 7443](https://gitlab.com/groups/gitlab-org/-/epics/7443):\n\n- [Multi-pack indices](https://git-scm.com/docs/multi-pack-index) and geometric\n  repacking will help us to further reduce the time spent repacking objects.\n- [Cruft packs](https://git-scm.com/docs/cruft-packs) will help us to further\n  reduce the time spent pruning objects and reduce the overall size of\n  unreachable objects.\n- Gitaly will automatically run housekeeping tasks when receiving mutating RPC\n  calls so that clients don't have to call `OptimizeRepository` at all anymore.\n\nSo stay tuned!\n\n",[702,1286,704],{"slug":1862,"featured":6,"template":678},"scaling-repository-maintenance","content:en-us:blog:scaling-repository-maintenance.yml","Scaling Repository Maintenance","en-us/blog/scaling-repository-maintenance.yml","en-us/blog/scaling-repository-maintenance",{"_path":1868,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1869,"content":1874,"config":1880,"_id":1882,"_type":16,"title":1883,"_source":17,"_file":1884,"_stem":1885,"_extension":20},"/en-us/blog/how-gitlab-can-help-mitigate-deletion-open-source-images-docker-hub",{"title":1870,"description":1871,"ogTitle":1870,"ogDescription":1871,"noIndex":6,"ogImage":1498,"ogUrl":1872,"ogSiteName":692,"ogType":693,"canonicalUrls":1872,"schema":1873},"GitLab helps mitigate Docker Hub's open source image removal","CI/CD and Kubernetes deployments can be affected by Docker Hub tier changes. This tutorial walks through analysis, mitigations, and long-term solutions.","https://about.gitlab.com/blog/how-gitlab-can-help-mitigate-deletion-open-source-images-docker-hub","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab can help mitigate deletion of open source container images on Docker Hub\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2023-03-16\",\n      }",{"title":1875,"description":1871,"authors":1876,"heroImage":1498,"date":1877,"body":1878,"category":14,"tags":1879},"How GitLab can help mitigate deletion of open source container images on Docker Hub",[1504],"2023-03-16","\nDocker, Inc. shared an email update to Docker Hub users that it will [sunset Free Team organizations](https://www.infoworld.com/article/3690890/docker-sunsets-free-team-subscriptions-roiling-open-source-projects.html). If accounts do not upgrade to a paid plan before April 14, 2023, their organization's images may be deleted after 30 days. This change can affect open source organizations that publish their images on Docker Hub, as well as consumers of these container images, used in CI/CD pipelines, Kubernetes cluster deployments, or docker-compose demo environments. This blog post discusses tools and features on the GitLab DevSecOps platform to help users analyze and mitigate the potential impact on production environments.\n\n_Update (March 20, 2023): Docker, Inc. [published an apology blog post](https://www.docker.com/blog/we-apologize-we-did-a-terrible-job-announcing-the-end-of-docker-free-teams/), including a FAQ, and clarifies that the company will not delete container images by themselves. Maintainers can migrate to a personal account, join the Docker-sponsored open source program, or opt into a paid plan. If open source container image maintainers do nothing, this leads into another issue: Stale container images can become a security problem. The following blog post can help with security analysis and migration too._ \n\n_Update (March 27, 2023): On March 24, 2023, Docker, Inc. [published another blog post](https://www.docker.com/blog/no-longer-sunsetting-the-free-team-plan/) announcing the reversal of the decision to sunset the Free team plan and updated its [FAQ for Free Team organization](https://www.docker.com/developers/free-team-faq/). While this is a welcome development for the entire community, it is still crucial to ensure the reliability of your software development lifecycle by ensuring redundancies are in place for your container registries, as detailed in this blog post._\n\n### Inventory of used container images\n\nCI/CD pipelines in GitLab can execute jobs in containers. This is specified by the [`image` keyword](https://docs.gitlab.com/ee/ci/yaml/#image) in jobs, job templates, or as a global [`default`](https://docs.gitlab.com/ee/ci/yaml/#default) attribute. For the first iteration, you can clone a GitLab project locally, and search for the `image` string in all CI/CD configuration files. The following example shows how to execute the `find` command on the command line interface (CLI), searching for files matching the name pattern `*ci.yml`, and looking for the `image` string in the file content. The command line prints a list of search pattern matches, and the corresponding file name to the standard output. The example inspects the [project](https://gitlab.com/gitlab-com/www-gitlab-com) for the [GitLab handbook](https://about.gitlab.com/handbook/) and [website](https://about.gitlab.com/) to analyze whether its CI/CD deployment pipelines could be affected by the Docker Hub changes.\n\n```bash\n$ git clone https://gitlab.com/gitlab-com/www-gitlab-com && cd www-gitlab-com\n\n$ find . -type f -iname '*ci.yml' -exec sh -c \"grep 'image:' '{}' && echo {}\" \\;\n\n  image: registry.gitlab.com/gitlab-org/gitlab-build-images:www-gitlab-com-debian-${DEBIAN_VERSION}-ruby-3.0-node-16\n  image: alpine:edge\n  image: alpine:edge\n  image: debian:stable-slim\n  image: debian:stable-slim\n  image: registry.gitlab.com/gitlab-org/gitlab-build-images:danger\n./.gitlab-ci.yml\n```\n\nA [discussion on Hacker News](https://news.ycombinator.com/item?id=35168802) mentions that \"official Docker images\" are not affected, but this is not officially confirmed by Docker yet. [Official Docker images](https://hub.docker.com/u/library) do not use a namespace prefix, i.e. `namespace/imagename` but instead `debian:\u003Ctagname>` for example. `registry.gitlab.com/gitlab-org/gitlab-build-images:danger` uses a full URL image string, which includes the image registry server domain, `registry.gitlab.com` in the shown example.\n\nIf there is no full URL prefix in the image string, this is an indicator that this image could be pulled from Docker Hub by default. There might be other infrastructure safety nets put in place, for example a cloud provider registry which caches the Docker Hub images (Google Cloud, AWS, Azure, etc.).\n\n#### Advanced search for images\n\nYou can use the [project lint API endpoint](https://docs.gitlab.com/ee/api/lint.html#validate-a-projects-ci-configuration) to fetch the CI configuration. The following script uses the [python-gitlab API library](https://python-gitlab.readthedocs.io/en/stable/gl_objects/ci_lint.html) to implement the API endpoint:\n\n1. Collect all projects from either a single project ID, a group ID with projects, or from the instance.\n2. Run the `project.ci_lint.get()` method to get a merged yaml configuration for CI/CD from the current GitLab project.\n3. Parse the yaml content and print only the job names, and the image keys.\n\nThe [full script is located here](https://gitlab.com/gitlab-da/use-cases/gitlab-api/gitlab-api-python/-/blob/main/get_all_cicd_job_images.py), and is open source, licensed under MIT.\n\n```python\n#!/usr/bin/env python\n\nimport gitlab\nimport os\nimport sys\nimport yaml\n\nGITLAB_SERVER = os.environ.get('GL_SERVER', 'https://gitlab.com')\nGITLAB_TOKEN = os.environ.get('GL_TOKEN') # token requires developer permissions\nPROJECT_ID = os.environ.get('GL_PROJECT_ID') #optional\n# https://gitlab.com/gitlab-da/use-cases/docker\nGROUP_ID = os.environ.get('GL_GROUP_ID', 65096153) #optional\n\n#################\n# Main\n\nif __name__ == \"__main__\":\n    if not GITLAB_TOKEN:\n        print(\"🤔 Please set the GL_TOKEN env variable.\")\n        sys.exit(1)\n\n    gl = gitlab.Gitlab(GITLAB_SERVER, private_token=GITLAB_TOKEN)\n\n    # Collect all projects, or prefer projects from a group id, or a project id\n    projects = []\n\n    # Direct project ID\n    if PROJECT_ID:\n        projects.append(gl.projects.get(PROJECT_ID))\n\n    # Groups and projects inside\n    elif GROUP_ID:\n        group = gl.groups.get(GROUP_ID)\n\n        for project in group.projects.list(include_subgroups=True, all=True):\n            # https://python-gitlab.readthedocs.io/en/stable/gl_objects/groups.html#examples\n            manageable_project = gl.projects.get(project.id)\n            projects.append(manageable_project)\n\n    # All projects on the instance (may take a while to process)\n    else:\n        projects = gl.projects.list(get_all=True)\n\n    print(\"# Summary of projects and their CI/CD image usage\")\n\n    # Loop over projects, fetch .gitlab-ci.yml, run the linter to get the full translated config, and extract the `image:` setting\n    for project in projects:\n\n        print(\"# Project: {name}, ID: {id}\\n\\n\".format(name=project.name_with_namespace, id=project.id))\n\n        # https://python-gitlab.readthedocs.io/en/stable/gl_objects/ci_lint.html\n        lint_result = project.ci_lint.get()\n\n        data = yaml.safe_load(lint_result.merged_yaml)\n\n        for d in data:\n            print(\"Job name: {n}\".format(n=d))\n            for attr in data[d]:\n                if 'image' in attr:\n                    print(\"Image: {i}\".format(i=data[d][attr]))\n\n        print(\"\\n\\n\")\n\nsys.exit(0)\n```\n\nThe [script](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/get_all_cicd_job_images.py) requires Python (tested with 3.11) and the python-gitlab and pyyaml modules. Example on macOS with Homebrew:\n\n```shell\n$ brew install python\n$ pip3 install python-gitlab pyyaml\n```\n\nYou can execute the script and set the different environment variables to control its behavior:\n\n```shell\n$ export GL_TOKEN=$GITLAB_TOKEN\n\n$ export GL_GROUP_ID=12345\n$ export GL_PROJECT_ID=98765\n\n$ python3 get_all_cicd_job_images.py\n\n# Summary of projects and their CI/CD image usage\n# Project: Developer Evangelism at GitLab  / use-cases / Docker Use cases  / Custom Container Image Python, ID: 44352983\n\nJob name: docker-build\nImage: docker:latest\n\n# Project: Developer Evangelism at GitLab  / use-cases / Docker Use cases  / Gitlab Dependency Proxy, ID: 44351128\n\nJob name: .test-python-version\nJob name: image-docker-hub\nImage: python:3.11\nJob name: image-docker-hub-dep-proxy\nImage: ${CI_DEPENDENCY_PROXY_GROUP_IMAGE_PREFIX}/python:3.11\n```\n\nPlease verify the script and fork it for your own analysis and mitigation. The missing parts are checking the image URLs, and doing a more sophisticated search. The code has been prepared to either check against a single project, a group with projects, or an instance (this may take very long, use with care).\n\nYou can perform a more history-focused analysis by fetching the CI/CD job logs from GitLab and search for the pulled container image to get an overview of past Docker executor runs – for example: `Using Docker executor with image python:3.11 ...`. The screenshot shows the CI/CD job logs UI search – you can automate the search using the GitLab API, and the [python-gitlab library](https://python-gitlab.readthedocs.io/en/stable/gl_objects/pipelines_and_jobs.html#jobs), for example.\n\n![GitLab CI/CD job logs, searching for the `image` keyword](https://about.gitlab.com/images/blogimages/docker-hub-oss-image-deletion-mitigation/cicd_gitlab_job_logs_search_image.png)\n\nThis snippet can be used in combination with the code shared for the CI lint API endpoint. It fetches the job trace logs, and searches for the `image` keyword in the log. The missing parts are splitting the log line by line, and extracting the image key information. This is left as an exercise for the reader.\n\n```python\n        for job in project.jobs.list():\n            log_trace = str(job.trace())\n\n            print(log_trace)\n\n            if 'image' in log_trace:\n                print(\"Job ID: {i}, URL {u}\".format(i=job.id, u=job.web_url))\n                print(log_trace)\n```\n\n### More inventory considerations\n\nSimilar to the API script for CI/CD navigating through all projects, you will need to analyze all Kubernetes manifest configuration files – using either a pull- or push-based approach. This can be achieved by using the [python-gitlab methods to load files from the repository](https://python-gitlab.readthedocs.io/en/stable/gl_objects/projects.html#project-files) and searching the content in similar ways. Helm charts use container images, too, and will require additional analysis.\n\nAn additional search possibility: Custom-built container images that use Docker Hub images as a source. A project will consist of:\n\n1. `Dockerfile` file that uses `FROM \u003Cimagename>`\n2. `.gitlab-ci.yml` configuration file that builds container images (using Docker-in-Docker, Kaniko, etc.)\n\nAn alternative search method for customers is available by using the [Advanced Search](https://docs.gitlab.com/ee/user/search/advanced_search.html) through the GitLab UI and API. The following example uses the [scope: blobs](https://docs.gitlab.com/ee/api/search.html#scope-blobs-premium-2) to search for the `FROM` string:\n\n```shell\n$ export GITLAB_TOKEN=xxxxxxxxx\n\n# Search in https://gitlab.com/gitlab-da\n/use-cases/docker/custom-container-image-python\n\n$ curl --header \"PRIVATE-TOKEN: $GITLAB_TOKEN\" \"https://gitlab.com/api/v4/projects/44352983/search?scope=blobs&search=FROM%20filename:Dockerfile*\"\n```\n\n![Command line output from Advanced Search API, scope blobs, search `FROM` in `Dockerfile*` file names.](https://about.gitlab.com/images/blogimages/docker-hub-oss-image-deletion-mitigation/cli_gitlab_advanced_search_api_dockerfile_from.png)\n\n## Mitigations and solutions\n\nThe following sections discuss potential mitigation strategies, and long-term solutions.\n\n### Mitigation: GitLab dependency proxy\n\nThe dependency proxy provides a caching mechanism for Docker Hub images. It helps reduce the bandwidth and time required to download and pull the images. It also helped to [mitigate the Docker Hub pull rate limits introduced in 2020](/blog/minor-breaking-change-dependency-proxy/). The dependency proxy can be configured for public and private projects.\n\nThe [dependency proxy](https://docs.gitlab.com/ee/user/packages/dependency_proxy/) needs to be enabled for a group. It also needs to be enabled by an instance administrator for self-managed environments, if turned off.\n\nThe following example creates two jobs: `image-docker-hub` and `image-docker-hub-dep-proxy`. The dependency proxy job uses the `CI_DEPENDENCY_PROXY_GROUP_IMAGE_PREFIX` CI/CD variable to instruct GitLab to store the image in the cache, and only pull it once when not available.\n\n```yaml\n.test-python-version:\n  script:\n    - echo \"Testing Python version:\"\n    - python --version\n\nimage-docker-hub:\n  extends: .test-python-version\n  image: python:3.11\n\nimage-docker-hub-dep-proxy:\n  extends: .test-python-version\n  image: ${CI_DEPENDENCY_PROXY_GROUP_IMAGE_PREFIX}/python:3.11\n```\n\nThe configuration is available in [this project](https://gitlab.com/gitlab-de/use-cases/docker/gitlab-dependency-proxy).\n\nThe stored container image is visible at the group level in the `Package and container registries > Dependency Proxy` menu.\n\n### Mitigation: Container registry mirror\n\n[This blog post](/blog/mitigating-the-impact-of-docker-hub-pull-requests-limits/) describes how to run a local container registry mirror. Skopeo from Red Hat is another alternative for syncing container image registries, a practical example is described [in this article](https://marcbrandner.com/blog/transporting-container-images-with-skopeo/).\n\nThe GitLab Cloud Native installation ([Helm charts](https://docs.gitlab.com/charts/) and [Operator](https://docs.gitlab.com/operator/)) use a [mirror of tagged images](https://gitlab.com/gitlab-org/cloud-native/mirror/images) consumed by the related projects. Other product stages follow a similar approach, the [security scanners are shipped in container images](https://docs.gitlab.com/ee/user/application_security/offline_deployments/#container-registries-and-package-repositories) maintained by GitLab. This also enables self-managed airgapped installations.\n\n### Mitigation: Custom images in GitLab container registry\n\nReproducible builds and compliance requirements may have required you to create custom container images for CI/CD and Kubernetes already. This is also key to verify that no untested and untrusted images are being used in production. GitLab provides a fully integrated [container registry](https://docs.gitlab.com/ee/user/packages/container_registry/), which can be used natively within CI/CD pipelines and [GitOps workflows with the agent for Kubernetes](https://docs.gitlab.com/ee/user/clusters/agent/gitops.html).\n\nThe following `Dockerfile` example extends an existing image layer, and installs additional tools using the Debian Apt package manager.\n\n```\nFROM python:3.11-bullseye\n\nENV DEBIAN_FRONTEND noninteractive\n\nRUN apt update && apt -y install git curl jq && rm -rf /var/lib/apt/lists/*\n```\n\nYou can [use Docker to build container images](https://docs.gitlab.com/ee/ci/docker/using_docker_build.html), and alternative options are Kaniko or Podman. On GitLab.com SaaS, you can use the Docker CI/CD template to build and push images. The following example modifies the `docker-build` job to only build the latest tag from the default branch:\n\n```yaml\ninclude:\n  - template: Docker.gitlab-ci.yml\n\ndocker-build:\n  stage: build\n  rules:\n    - if: '$CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH || $CI_COMMIT_TAG'\n      #when: manual\n      #allow_failure: true\n```\n\nFor this example, we specifically want to provide a Git tag that gets used for the container image tag as well.\n\n```\n$ git tag 3-11-bullseye\n$ git push --tags\n```\n\nThe image will be available at the GitLab container registry URL and the project namespace path.This path needs to be replaced in all projects that use a Python-based image. You can [create scripts for the GitLab API](/blog/efficient-devsecops-workflows-hands-on-python-gitlab-api-automation/) to update files and create MRs automatically,\n\n```\nimage: registry.gitlab.com/gitlab-da/use-cases/docker/custom-container-image-python:3-11-bullseye\n```\n\n_Note: This is a demo project and not actively maintained. Please fork/copy it for your own needs._\n\n## Observability and security\n\nThe [number of failed CI/CD pipelines](https://docs.gitlab.com/ee/user/analytics/ci_cd_analytics.html) can be a good service level indicator (SLI) to verify whether the environment is affected by the Docker Hub changes. The same SLI applies for CI/CD jobs that build container images, using a `Dockerfile` file, which is based on Docker Hub images (FROM \u003Cimagename>).\n\nA similar SLI applies to Kubernetes cluster deployments – if they continue to generate failures in GitOps pull or CI/CD push scenarios, additional analysis and actions are required. The pod status `ErrImagePull` and [`ImagePullBackOff`](https://kubernetes.io/docs/concepts/containers/images/#imagepullbackoff) will immediately show the problems. The [image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) should also be revised – `Always` will immediately cause a problem, while `IfNotPresent` will use the local image cache.\n\n[This alert rule example](https://awesome-prometheus-alerts.grep.to/rules.html#rule-kubernetes-1-18) for Prometheus observing a Kubernetes cluster can help detect the pod state as not healthy.\n\n```yaml\n  - alert: KubernetesPodNotHealthy\n    expr: sum by (namespace, pod) (kube_pod_status_phase{phase=~\"Pending|Unknown|Failed\"}) > 0\n    for: 15m\n    labels:\n      severity: critical\n    annotations:\n      summary: Kubernetes Pod not healthy (instance {{ $labels.instance }})\n      description: \"Pod has been in a non-ready state for longer than 15 minutes.\\n  VALUE = {{ $value }}\\n  LABELS = {{ $labels }}\"\n```\n\nCI/CD pipeline linters and Git hooks can also be helpful to enforce using a GitLab registry URL prefix in all `image` tags, when new updates to CI/CD configurations are being pushed into merge requests.\n\nKubernetes deployment images can be controlled through additional integrations with the [Open Policy Agent Gatekeeper](https://www.openpolicyagent.org/docs/latest/kubernetes-introduction/) or [Kyverno](https://kyverno.io/policies/best-practices/restrict_image_registries/restrict_image_registries/). Kyverno also allows you to [mutate the image registry location](https://kyverno.io/policies/other/replace_image_registry/replace_image_registry/), and redirect the pod image to trusted sources.\n\n[Operational container scanning](https://docs.gitlab.com/ee/user/clusters/agent/vulnerabilities.html) in Kubernetes clusters and [container scanning in CI/CD pipelines](https://docs.gitlab.com/ee/user/application_security/container_scanning/) are recommended. This ensures that all images do not expose security vulnerabilities.\n\n## Long-term solutions\n\nAs a long-term solution, analyze the affected Docker Hub organizations images and match them against your image usage inventory. Some organizations have raised their concerns in [this Docker Hub feedback issue](https://github.com/docker/hub-feedback/issues/2314). Be sure to identify critical production CI/CD workflows and replace all external dependencies with local maintained images.\n\nFork/copy project Dockerfile files from the upstream Git repositories, and use them as the single source of truth for custom container builds. This will also require training and documentation for DevSecOps teams, for example optimizing container images for [efficient CI/CD pipelines](https://docs.gitlab.com/ee/ci/pipelines/pipeline_efficiency.html). More DevSecOps efficiency tips can be found in my Chemnitz Linux Days talk about \"Efficient DevSecOps Pipelines in a Cloud Native World\" ([slides](https://go.gitlab.com/RPog2h)).\n\n\u003Ciframe src=\"https://docs.google.com/presentation/d/e/2PACX-1vT3jcfpddKL2jq7leX01QX6S4Y8vfLLBZMz4L1ZHMLY3xzB4IGOOIExODLEzH8YQM1atCNPm07Bw9m_/embed?start=false&loop=true&delayms=3000\" frameborder=\"0\" width=\"960\" height=\"569\" allowfullscreen=\"true\" mozallowfullscreen=\"true\" webkitallowfullscreen=\"true\">\u003C/iframe>\n\nPlease share your ideas and thoughts about Docker Hub change mitigations and tools on the [GitLab community forum](https://forum.gitlab.com/). Thank you!\n\nCover image by [Roger Hoyles](https://unsplash.com/photos/sTOQyRD8m74) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[832,1002,703],{"slug":1881,"featured":6,"template":678},"how-gitlab-can-help-mitigate-deletion-open-source-images-docker-hub","content:en-us:blog:how-gitlab-can-help-mitigate-deletion-open-source-images-docker-hub.yml","How Gitlab Can Help Mitigate Deletion Open Source Images Docker Hub","en-us/blog/how-gitlab-can-help-mitigate-deletion-open-source-images-docker-hub.yml","en-us/blog/how-gitlab-can-help-mitigate-deletion-open-source-images-docker-hub",{"_path":1887,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1888,"content":1894,"config":1901,"_id":1903,"_type":16,"title":1904,"_source":17,"_file":1905,"_stem":1906,"_extension":20},"/en-us/blog/getting-started-with-gitlab-application-security",{"title":1889,"description":1890,"ogTitle":1889,"ogDescription":1890,"noIndex":6,"ogImage":1891,"ogUrl":1892,"ogSiteName":692,"ogType":693,"canonicalUrls":1892,"schema":1893},"Getting started with GitLab application security","This tutorial shows how to incorporate GitLab security scan templates into a .gitlab-ci.yml file and view scan results.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663993/Blog/Hero%20Images/2018-developer-report-cover.jpg","https://about.gitlab.com/blog/getting-started-with-gitlab-application-security","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Getting started with GitLab application security\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Victor Hernandez\"},{\"@type\":\"Person\",\"name\":\"Julie Byrne\"}],\n        \"datePublished\": \"2023-03-15\",\n      }",{"title":1889,"description":1890,"authors":1895,"heroImage":1891,"date":1898,"body":1899,"category":14,"tags":1900},[1896,1897],"Victor Hernandez","Julie Byrne","2023-03-15","\nAs software security becomes increasingly important, many companies want to introduce standard code scanning processes into development workflows to find and remediate security vulnerabilities before they get to production. GitLab's DevSecOps Platform allows users to perform security scans in CI/CD pipelines, which can easily be enabled to check applications for security vulnerabilities such as unauthorized access, data leaks, and denial of service (DoS) attacks. While most of what is covered in this blog will pertain to Ultimate features, there are some features available for free and Premium tier users as well. By the end of this blog, you will have a solid starting point for adopting GitLab security scans, with any tier license, and understand the steps to take next to mature your DevSecOps practices.\n\n## Prerequisites\nTo enable security scanning for a project, you must have the following:\n- a GitLab project that meets the requirements of the security scan you choose to enable, with CI enabled\n- a `.gitlab-ci.yml` file for the project that has at least a build job defined\n- a Linux-based GitLab Runner with the Docker or Kubernetes executor\n\n## Get started: Add a scan template to your pipeline\n\nHere are the first steps to introduce security scanning.\n\n### Available security scans\n\nGitLab provides a variety of security scanners, each with its own set of criteria for adoption:\n\n| Scan type | Minimum tier | Prerequisites | Application requirements |\n| --- | --- | --- | --- |\n| [Static application security testing (SAST)](https://docs.gitlab.com/ee/user/application_security/sast/) | Free | None | See [SAST requirements](https://docs.gitlab.com/ee/user/application_security/sast/index.html#requirements) |\n| [Secret detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/) | Free | None | None |\n| [Container scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/) | Free | Container image built and pushed to registry | [Docker 18.09.03 or higher installed on the same computer as the runner](https://docs.gitlab.com/ee/user/application_security/container_scanning/index.html#requirements); image uses a [supported distribution](https://docs.gitlab.com/ee/user/application_security/container_scanning/index.html#supported-distributions) |\n| [Infrastructure as code (IaC) scanning](https://docs.gitlab.com/ee/user/application_security/iac_scanning/) |  Free | None | See [supported languages and frameworks](https://docs.gitlab.com/ee/user/application_security/iac_scanning/#supported-languages-and-frameworks) |\n| [Dependency scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/) - includes license compliance | Ultimate | None | Application must use one of the [supported languages and package managers](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/index.html#supported-languages-and-package-managers) |\n| [Dynamic application security testing (DAST)](https://docs.gitlab.com/ee/user/application_security/dast/) | Ultimate | [Deployed target application](https://docs.gitlab.com/ee/user/application_security/dast/index.html#prerequisites) | See [GitLab DAST scanning options](https://docs.gitlab.com/ee/user/application_security/dast/index.html#gitlab-dast) |\n| [Coverage-guided fuzz testing](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/) | Ultimate | Instrumented version of application | See [supported fuzzing engines and languages](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/index.html#supported-fuzzing-engines-and-languages) |\n| [Web API fuzz testing](https://docs.gitlab.com/ee/user/application_security/api_fuzzing/) |  Ultimate | Deployed target application | See [supported API types](https://docs.gitlab.com/ee/user/application_security/api_fuzzing/#enable-web-api-fuzzing) |\n\nMany customers will start with secret detection, dependency scanning, or SAST scanning, as they have the fewest requirements for usage.\n\n### Add the scanner template\n\nGitLab provides a [CI template for each security scan](https://docs.gitlab.com/ee/user/application_security/#security-scanning-without-auto-devops) that can be added to your existing `.gitlab-ci.yml` file. This can be done by manually editing the CI file and adding the appropriate template path in the templates section of the file. Several scanners can also be [enabled via the UI](https://docs.gitlab.com/ee/user/application_security/sast/#configure-sast-in-the-ui), where a merge request will be created to add the appropriate scanner to the `.gitlab-ci.yml` file. \n\nI will use a simple spring boot application as an example and enable dependency scanning, a scanner that is popular amongst our customers, as my first security scan. Dependency scanning will find vulnerabilities in the libraries I am using to build my application. My project is a Java application built via Maven and includes a `pom.xml` file, so it meets the requirements for dependency scanning. Since dependency scanning can be enabled via the UI, I'm going to take advantage of that feature here. \n\nFor this project, I have created a `.gitlab-ci.yml` file that contains a build and test stage and a build job. I'm using the Auto DevOps auto-build job, but you can define your own build job if desired. This is the starting pipeline code in my `.gitlab-ci.yml` file:\n\n```\nimage: alpine:latest\n\ninclude:\n  - template: Jobs/Build.gitlab-ci.yml  # https://gitlab.com/gitlab-org/gitlab-foss/blob/master/lib/gitlab/ci/templates/Jobs/Build.gitlab-ci.yml\n\nstages:\n- build\n- test\n\n```\n\nTo enable dependency scanning, I'll first navigate to the **Security & Compliance** menu, **Configuration** sub-menu.\n\n![web identity](https://about.gitlab.com/images/blogimages/2023-02-26-getting-started-with-gitlab-application-security/security_config.png){: .shadow}\n\nThe option to enable dependency scanning is available about halfway down the page. When I click `Configure with a merge request`, a branch is created and I am prompted to create a corresponding draft merge request. I'll click `Create Merge Request` to save the merge request.\n\nOnce the merge request has been created, I see that a new branch `set-dependency-scanning-config-1` has been created and the `.gitlab-ci.yml` file has been updated with this code:\n\n```\n# You can override the included template(s) by including variable overrides\n# SAST customization: https://docs.gitlab.com/ee/user/application_security/sast/#customizing-the-sast-settings\n# Secret Detection customization: https://docs.gitlab.com/ee/user/application_security/secret_detection/#customizing-settings\n# Dependency Scanning customization: https://docs.gitlab.com/ee/user/application_security/dependency_scanning/#customizing-the-dependency-scanning-settings\n# Container Scanning customization: https://docs.gitlab.com/ee/user/application_security/container_scanning/#customizing-the-container-scanning-settings\n# Note that environment variables can be set in several places\n# See https://docs.gitlab.com/ee/ci/variables/#cicd-variable-precedence\nimage: alpine:latest\ninclude:\n- template: Jobs/Build.gitlab-ci.yml\n- template: Security/Dependency-Scanning.gitlab-ci.yml\nstages:\n- build\n- test\n\n```\n\nThe change kicks off a pipeline, which will now include the dependency scan.\n\n![web identity](https://about.gitlab.com/images/blogimages/2023-02-26-getting-started-with-gitlab-application-security/dependency_job.png){: .shadow}\n\n## View results of the security scan\n\nFor all license tiers, you can view the results of any security scan jobs in the appropriate JSON report that can be downloaded from the merge request.\n\n![web identity](https://about.gitlab.com/images/blogimages/2023-02-26-getting-started-with-gitlab-application-security/mr_artifacts.png){: .shadow}\n\nWith GitLab Ultimate, you will also see the vulnerabilities found by the scan in the merge request widget.\n\n![web identity](https://about.gitlab.com/images/blogimages/2023-02-26-getting-started-with-gitlab-application-security/mr_widget.png){: .shadow}\n\nAt this point, the `.gitlab-ci.yml` changes that enable security scanning are only available in the `set-dependency-scanning-config-1` branch. I will merge them to `main` so that the changes will be included in all future feature branches.\n\nWith GitLab Ultimate, merging to `main` will also provide the baseline **Vulnerability Report** for our application.  \n\n![web identity](https://about.gitlab.com/images/blogimages/2023-02-26-getting-started-with-gitlab-application-security/vuln_report.png){: .shadow}\n\nNow, scan results presented in the merge request widget for any new merge requests will only show vulnerabilities introduced by those new code changes in the corresponding feature branch, and not the baseline of vulnerabilities that already exist on `main`.\n\n## Scan enforcement\n\nOnce you have enabled your first scans in your CI/CD pipelines, you might be curious to know how you can enforce security scans, or enforce a review and approval when critical vulnerabilities are found in new code changes. I recommend reviewing these resources that cover these topics. \n - For Ultimate customers: [How to ensure separation of duties and enforce compliance with GitLab](/blog/ensuring-compliance/)\n - For Premium customers: [How to action security vulnerabilities in GitLab Premium](https://about.gitlab.com/blog/actioning-security-vulnerabilities-in-gitlab-premium/)\n\nNow that you've gained comfort with security scanners as part of the GitLab CI/CD pipeline, check out our [Getting Started with GitLab Application Security](https://docs.gitlab.com/ee/user/application_security/get-started-security.html) documentation for recommended next steps.\n\n## More resources\n - [How GitLab's application security dashboard helps AppSec engineers](/blog/secure-stage-for-appsec/)\n - [Running security scans in limited connectivity and offline environments](/blog/offline-environments/)\n - [GitLab's newest continuous compliance features bolster software supply chain security](/blog/gitlabs-newest-continuous-compliance-features-bolster-software/)\n",[725,832,937,1307],{"slug":1902,"featured":6,"template":678},"getting-started-with-gitlab-application-security","content:en-us:blog:getting-started-with-gitlab-application-security.yml","Getting Started With Gitlab Application Security","en-us/blog/getting-started-with-gitlab-application-security.yml","en-us/blog/getting-started-with-gitlab-application-security",{"_path":1908,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1909,"content":1915,"config":1920,"_id":1922,"_type":16,"title":1923,"_source":17,"_file":1924,"_stem":1925,"_extension":20},"/en-us/blog/expanding-guest-capabilities-in-gitlab-ultimate",{"title":1910,"description":1911,"ogTitle":1910,"ogDescription":1911,"noIndex":6,"ogImage":1912,"ogUrl":1913,"ogSiteName":692,"ogType":693,"canonicalUrls":1913,"schema":1914},"The feature you wanted - Expanded Guest capabilities in GitLab Ultimate","GitLab Ultimate customers can now provide Guests the ability to view code. Learn how to access this new capability.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682652/Blog/Hero%20Images/iterating-cover.jpg","https://about.gitlab.com/blog/expanding-guest-capabilities-in-gitlab-ultimate","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The feature you wanted - Expanded Guest capabilities in GitLab Ultimate\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Hannah Sutor\"}],\n        \"datePublished\": \"2023-03-08\",\n      }",{"title":1910,"description":1911,"authors":1916,"heroImage":1912,"date":1917,"body":1918,"category":14,"tags":1919},[1303],"2023-03-08","\n\n[Customizable roles](https://docs.gitlab.com/ee/user/permissions.html) have been on GitLab's roadmap for the past two years. When we began working on them a year ago, our team struggled to find the [minimal viable change](https://about.gitlab.com/handbook/product/product-principles/#the-minimal-viable-change-mvc) (MVC) that would benefit customers. At the same time, through different feedback channels, customers were telling us they wanted more from their Ultimate tier Guest user roles. There it was: our MVC!\n\nHere is what happened next.\n\n## Our MVC journey\n\nWhen we began working on customizable roles, we knew that the six static, out-of-the-box roles that come with GitLab were not flexible enough to cover the use cases of our customers. Some roles were too permissive, while others didn’t grant the permissions necessary to accomplish a task. At a time when security and abiding by [the principle of least privilege](https://www.techtarget.com/searchsecurity/definition/principle-of-least-privilege-POLP) is more top of mind than ever, we needed to give our customers a way to define their own roles.\n\nThe customer ask was clear, but the implementation path was not. Performance considerations were top of mind. Permission policies are evaluated when any user action is performed, and we need a secure but scalable way for thousands of users, who may have hundreds of custom roles created, to do their work in GitLab. The team did a lot of technical discovery and performance testing in order to ensure the chosen technical implementation was scalable.\n\nWe decided to start with a very small implementation of custom roles - something that would be meaningful to customers, while also allowing our team to test the new backend implementation that will support custom role creation and usage.\n\n## How custom roles work\n\nFor our MVC, we decided that GitLab.com customers with an Ultimate license should be able to create a custom role that is based on the current “Guest” role. They will be able to add one additional permission to the “Guest” role - the ability to view code. This effectively creates a “Guest+1” role. They can then assign this custom role to any existing user. \n\nPreviously, Guests were able to view code on Self-Managed GitLab, and only on internal or public projects. Now, this functionality is available to Guests across the board - in GitLab.com and Self-Managed GitLab, and regardless of project visibility setting. You just need to create and apply the custom Guest role to any user who wishes to view code.\n\nYou can read more about how to [implement this yourself](https://docs.gitlab.com/ee/user/permissions.html#custom-roles) and watch a demo [here](https://about.gitlab.com/releases/2023/02/22/gitlab-15-9-released/#users-with-the-guest-role-can-view-private-repositories).\n\n## Create a custom role\n\nUse the API to create the “Guest+1” custom role. This role will show up as \"Guest - custom\" in the UI, so that it's easy to see which users have this version of the \"Guest\" role assigned.\n\nOnce the custom role is created, you can [use the API](https://docs.gitlab.com/ee/user/permissions.html#custom-roles) to associate it to a list of users. Voila! Now, your users have a custom role that allows them to view code as a Guest.\n\n![customizable guest role](https://about.gitlab.com/images/blogimages/iterating-towards-customizable-roles/guest-custom-role.png){: .shadow}\n\n## Why this MVC?\n\nSometimes, something is so loud that you’re forced to listen to it. That’s undoubtedly how I felt when I heard the dissatisfaction of our Ultimate customers around Guest users in private projects.\n\nAn unlimited number of Guest users are free with a GitLab Ultimate subscription. However, if the Guest user doesn’t have enough access to really do much within the product, is it really of any value at all? Customers left us a lot of feedback that the low level of privilege the Guest users have for private projects was detrimental to their users' workflows - making those “free” users not actually useful at all. We knew it was time to deliver more value.\n\n## What’s next\n\nOur final vision for customizable roles in GitLab is for our users to be able to take what exists today in our [permissions table](https://docs.gitlab.com/ee/user/permissions.html) and toggle each permission off/on as they please to define a custom role. \n\nWe plan to start on this by [consolidating](https://gitlab.com/groups/gitlab-org/-/epics/8914) some of these permissions - both for practical and performance reasons. As you can imagine, some permissions don’t make sense to be toggled “on” if a different feature is “off.\" We will be removing the need for complex logic by consolidating permissions into larger sets that make sense to be enabled/disabled at the same time. This should also translate nicely on the usability side - permutations of 100+ individual permissions would be unwieldy to manage, as a systems administrator, and difficult to understand your role definition, as an end user.\n\nThis update to custom roles is a great example of our iteration value here at GitLab, and I’m most excited about the fact that it’s solving an acute pain point for our Ultimate customers. They deserve to get a lot of value out of their Ultimate subscription, and I am hopeful that an additional permission for Guest users is one way we can increase their value. It’s also a great first step towards our grand customizable roles vision. I hope you’ll give it a try!\n\n**Check out this demo that shows the customizable guest role in action:**\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/46cp_-Rtxps\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[749,1347,480],{"slug":1921,"featured":6,"template":678},"expanding-guest-capabilities-in-gitlab-ultimate","content:en-us:blog:expanding-guest-capabilities-in-gitlab-ultimate.yml","Expanding Guest Capabilities In Gitlab Ultimate","en-us/blog/expanding-guest-capabilities-in-gitlab-ultimate.yml","en-us/blog/expanding-guest-capabilities-in-gitlab-ultimate",{"_path":1927,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1928,"content":1934,"config":1940,"_id":1942,"_type":16,"title":1943,"_source":17,"_file":1944,"_stem":1945,"_extension":20},"/en-us/blog/how-to-deploy-react-to-amazon-s3",{"title":1929,"description":1930,"ogTitle":1929,"ogDescription":1930,"noIndex":6,"ogImage":1931,"ogUrl":1932,"ogSiteName":692,"ogType":693,"canonicalUrls":1932,"schema":1933},"How to deploy a React application to Amazon S3 using GitLab CI/CD","Follow this guide to use OpenID Connect to connect to AWS and deploy a React application to Amazon S3.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663291/Blog/Hero%20Images/cover1.jpg","https://about.gitlab.com/blog/how-to-deploy-react-to-amazon-s3","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to deploy a React application to Amazon S3 using GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jeremy Wagner\"}],\n        \"datePublished\": \"2023-03-01\",\n      }",{"title":1929,"description":1930,"authors":1935,"heroImage":1931,"date":1937,"body":1938,"category":14,"tags":1939},[1936],"Jeremy Wagner","2023-03-01","\n\nAmazon S3 has a Static Website Hosting feature which allows you to host a static website directly from an S3 bucket. When you \nhost your website on S3, your website content is stored in the S3 bucket and served directly to your users, without the need \nfor additional resources. Combine this with Amazon CloudFront and you will have a cost-effective and scalable solution for \nhosting static websites – making it a popular choice for single-page applications.\n\nIn this post, I will walk you through setting up your Amazon S3 bucket, setting up OpenID Connect ([OIDC](https://openid.net/connect/)) in AWS, and deploying your application \nto your Amazon S3 bucket using a GitLab [CI/CD](/topics/ci-cd/) pipeline.\n\nBy the end of this post, you will have a [CI/CD pipeline](/blog/how-to-keep-up-with-ci-cd-best-practices/) built in GitLab that automatically deploys to your Amazon S3 bucket. Let's dive in.\n\n## Prerequisites\n\nFor this guide you will need the following:\n\n- [Node.js](https://nodejs.org/en/) >= 14.0.0 and npm >= 5.6 installed on your system\n- [Git](https://git-scm.com/) installed on your system\n- A [GitLab](https://gitlab.com/-/trial_registrations/new) account\n- An [AWS](https://aws.amazon.com/free/) account\n\n[A previous tutorial](/blog/how-to-automate-testing-for-a-react-application-with-gitlab/) demonstrated how to create a new React \napplication, run unit tests as part of the CI process in GitLab, and output the test results and code coverage into the pipeline. This post continues where that project left off, so to follow along you can fork [this project](https://gitlab.com/guided-explorations/engineering-tutorials/react-unit-testing) or complete the guide in the linked post.\n\n## Configure your Amazon S3 bucket\n\nYou'll need to configure your Amazon S3 bucket so let's do that first.\n\n### Create your bucket\n\nAfter you log in to your AWS account, search for S3 using the search bar and select the S3 service. This will open the S3 service home page.\n\nRight away, you should see the option to create a bucket. The bucket is where you are going to store your built React application. Click the **Create bucket** button to continue.\n\n![Create S3 bucket](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/create_bucket.png){: .shadow}\n\nGive your bucket a name, select your region, leave the rest of the settings as default (we’ll come back to these later), and continue by \nclicking the **Create bucket** button. When naming your bucket, it’s important to remember that your bucket name must be unique and follow the \nbucket naming rules. I named mine `jw-gl-react`.\n\nAfter creating your bucket, you should be taken to a list of your buckets as shown below.\n\n![S3 bucket list](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/bucket_list.png){: .shadow}\n\n### Configure static website hosting\n\nThe next step is to configure static website hosting. Open your S3 bucket by clicking into the bucket name. Select the **Properties** tab and \nscroll to the bottom to find the static website hosting option.\n\n![static hosting button](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/static_hosting_1.png){: .shadow}\n\nClick **Edit** and then enable static website hosting. For the **Index** and **Error** document, enter `index.html` and then click **Save changes**.\n\n![edit static hosting](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/static_hosting_2.png){: .shadow}\n\n### Set up permissions\n\nNow that you have enabled static website hosting, you need to update your permissions so the public can visit your website. Return to your bucket and select the **Permissions** tab.\n\nUnder **Block public access (bucket settings)**, click **Edit** and uncheck **Block all public access** and continue to **Save changes**.\n\n![block public access](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/block_access_1.png){: .shadow}\n\nYour page should now look this this:\n\n![saved blocked public access](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/block_access_2.png){: .shadow}\n\nNow, you need to edit the Bucket Policy. Click the **Edit** button in the **Bucket Policy** section. Paste the following code into your new policy:\n\n```javascript\n{\n    \"Version\": \"2012-10-17\",\n    \"Statement\": [\n        {\n            \"Sid\": \"PublicReadGetObject\",\n            \"Effect\": \"Allow\",\n            \"Principal\": \"*\",\n            \"Action\": \"s3:GetObject\",\n            \"Resource\": \"arn:aws:s3:::jw-gl-react/*\"\n        }\n    ]\n}\n```\n\nReplace `jw-gl-react` on the resource property with the name of your bucket and **Save changes**.\n\nYour bucket should now look like this:\n\n![publicly accessible bucket](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/block_access_3.png){: .shadow}\n\n## Manually upload your React application\n\nNow, let’s build your React application and manually publish it to your S3 bucket. \n\nTo build the application, make sure your project is cloned to your local machine and run the following command in your terminal inside of your \nrepository directory:\n\n```\nnpm run build\n```\n\nThis will create a build folder inside of your repository directory.\n\nInside of your bucket, click the **Upload** button.\n\n![manual bucket upload](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/upload_1.png){: .shadow}\n\nDrag the contents of your newly created build folder (not the folder itself) into the upload area. This will \nupload the contents of your application into your S3 bucket. Make sure to click **Upload** at the bottom of the page to start the upload.\n\nNow return to your bucket **Properties** tab and scroll to the bottom to find the URL of your static website.\n\n![static website url](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/upload_2.png){: .shadow}\n\nClick the link and you should see your built React application open in your browser.\n\n![deployed app](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/manual_deploy.png){: .shadow}\n\n## Set up OpenID Connect in AWS\n\nTo deploy to your S3 Bucket from GitLab, we’re going to use a GitLab CI/CD job to receive temporary credentials \nfrom AWS without needing to store secrets. To do this, we’re going to configure OIDC for ID federation \nbetween GitLab and AWS. We’ll be following the [related GitLab documentation](https://docs.gitlab.com/ee/ci/cloud_services/aws/).\n\n### Add the identity provider\n\nThe first step is going to be adding GitLab as an identity and access management (IAM) OIDC provider in AWS. AWS has instructions located [here](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_oidc.html), \nbut I will walk through it step by step.\n\nOpen the IAM console inside of AWS.\n\n![iam search](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/iam_1.png){: .shadow}\n\nOn the left navigation pane, under **Access management** choose **Identity providers** and then choose **Add provider**. \nFor provider type, select **OpenID Connect**.\n\nFor **Provider URL**, enter the address of your GitLab instance, such as `https://gitlab.com` or `https://gitlab.example.com`.\n\nFor **Audience**, enter something that is generic and specific to your application. In my case, I'm going to \nenter `react_s3_gl`. To prevent confused deputy attacks, it's best to make this something that is not easy to guess. Take a note of \nthis value, you will use it to set the `ID_TOKEN` in your `.gitlab-ci.yml` file.\n\nAfter entering the **Provider URL**, click **Get thumbprint** to verify the server certificate of your IdP. After this, go \nahead and choose **Add provider** to finish up.\n\n### Create the permissions policy\n\nAfter you create the identity provider, you need to create a permissions policy.\n\nFrom the IAM dashboard, under **Access management** select **Policies** and then **Create policy**. \nSelect the JSON tab and paste the following policy replacing `jw-gl-react` on the resource line with your bucket name.\n\n```javascript\n{\n  \"Version\": \"2012-10-17\",\n  \"Statement\": [\n    {\n      \"Effect\": \"Allow\",\n      \"Action\": [\"s3:ListBucket\"],\n      \"Resource\": [\"arn:aws:s3:::jw-gl-react\"]\n    },\n    {\n      \"Effect\": \"Allow\",\n      \"Action\": [\n        \"s3:PutObject\",\n        \"s3:GetObject\",\n        \"s3:DeleteObject\"\n      ],\n      \"Resource\": [\"arn:aws:s3:::jw-gl-react/*\"]\n    }\n  ]\n}\n```\n\nSelect the **Next: Tags** button, add any tags you want, and then select the **Next: Review** button. \nEnter a name for your policy and finish up by creating the policy. \n\n### Configure the role\n\nNow it’s time to add the role. From the IAM dashboard, under **Access management** select **Roles** \nand then select **Create role**. Select **Web identity**.\n\nIn the **Web identity** section, select the identity provider you created earlier. For the \n**Audience**, select the audience you created earlier. Select the **Next** button to continue.\n\nIf you wanted to limit authorization to a specific group, project, branch, or tag, you could create a **Custom trust policy** \ninstead of a **Web identity**. Since I will be deleting these resources after the tutorial, I'm going to keep it simple. For a \nfull list of supported filterting types, see the [GitLab documentation](https://docs.gitlab.com/ee/ci/cloud_services/index.html#configure-a-conditional-role-with-oidc-claims).\n\n![web identity](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/iam_2.png){: .shadow}\n\nDuring the **Add permissions** step, select the policy you created and select **Next** to continue. Give your role a name and click **Create role**.\n\nOpen the Role you just created. In the summary section, find the Amazon Resource Name (ARN) and save it somewhere secure. You will use this in your pipeline.\n\n![role](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/iam_3.png){: .shadow}\n\n## Deploy to your Amazon S3 bucket using a GitLab CI/CD pipeline\n\nInside of your project, create two [CI/CD variables](https://docs.gitlab.com/ee/ci/variables/#define-a-cicd-variable-in-the-ui). The first variable should be named `ROLE_ARN`. For the value, paste the ARN of the \nrole you just created. The second variable should be named `S3_BUCKET`. For the value, paste the name of the S3 bucket you created \nearlier in this post.\n\nI have chosen to mask my variables for an extra layer of security.\n\n### Retrieve your temporary credentials\n\nInside of your `.gitlab-ci.yml` file, paste the following code:\n\n```\n.assume_role: &assume_role\n    - >\n      STS=($(aws sts assume-role-with-web-identity\n      --role-arn ${ROLE_ARN}\n      --role-session-name \"GitLabRunner-${CI_PROJECT_ID}-${CI_PIPELINE_ID}\"\n      --web-identity-token $ID_TOKEN\n      --duration-seconds 3600\n      --query 'Credentials.[AccessKeyId,SecretAccessKey,SessionToken]'\n      --output text))\n    - export AWS_ACCESS_KEY_ID=\"${STS[0]}\"\n    - export AWS_SECRET_ACCESS_KEY=\"${STS[1]}\"\n    - export AWS_SESSION_TOKEN=\"${STS[2]}\"\n```\n\nThis is going to use the the AWS Security Token Service to generate temporary (_3,600 seconds_) credentials utilizing the OIDC role you created earlier.\n\n### Create the deploy job\n\nNow, let's add a build and deploy job to build your application and deploy it to your S3 bucket.\n\nFirst, update the stages in your `.gitlab-ci.yml` file to include a `build` and `deploy` stage as shown below:\n\n```\nstages:\n  - build\n  - test\n  - deploy\n```\n\nNext, let's add a job to build your application. Paste the following code in your `.gitlab-ci.yml` file:\n\n```\nbuild artifact:\n  stage: build\n  image: node:latest\n  before_script:\n    - npm install\n  script:\n    - npm run build\n  artifacts:\n    paths:\n      - build/\n    when: always\n  rules:\n    - if: '$CI_COMMIT_REF_NAME == \"main\"'\n      when: always\n```\n\nThis is going to run `npm run build` if the change occurs on the `main` branch and upload the build directory as an \nartifact to be used during the next step.\n\nNext, let's add a job to actually deploy to your S3 bucket. Paste the following code in your `.gitlab-ci.yml` file:\n\n```\ndeploy s3:\n  stage: deploy\n  image:\n    name: amazon/aws-cli:latest\n    entrypoint: \n      - '/usr/bin/env'\n  id_tokens:\n      ID_TOKEN:\n        aud: react_s3_gl\n  script:\n    - *assume_role\n    - aws s3 sync build/ s3://$S3_BUCKET\n  rules:\n    - if: '$CI_COMMIT_REF_NAME == \"main\"'\n      when: always\n```\n\nThis uses [YAML anchors](https://docs.gitlab.com/ee/ci/yaml/yaml_optimization.html#yaml-anchors-for-scripts) to run the `assume_role` script, \nand then uses the `aws cli` to upload your build artifact to the bucket you defined as a variable. This job also only runs if the change occurs \non the `main` branch.\n\nMake sure the `aud` value matches the value you entered for your audience when you setup the identity provider. In my case, I entered `react-s3_gl`.\n\nYour complete `.gitlab-ci.yml` file should look like this:\n\n```\nstages:\n  - build\n  - test\n  - deploy\n\n.assume_role: &assume_role\n    - >\n      STS=($(aws sts assume-role-with-web-identity\n      --role-arn ${ROLE_ARN}\n      --role-session-name \"GitLabRunner-${CI_PROJECT_ID}-${CI_PIPELINE_ID}\"\n      --web-identity-token $ID_TOKEN\n      --duration-seconds 3600\n      --query 'Credentials.[AccessKeyId,SecretAccessKey,SessionToken]'\n      --output text))\n    - export AWS_ACCESS_KEY_ID=\"${STS[0]}\"\n    - export AWS_SECRET_ACCESS_KEY=\"${STS[1]}\"\n    - export AWS_SESSION_TOKEN=\"${STS[2]}\"\n  \nunit test:\n  image: node:latest\n  stage: test\n  before_script:\n    - npm install\n  script:\n    - npm run test:ci\n  coverage: /All files[^|]*\\|[^|]*\\s+([\\d\\.]+)/\n  artifacts:\n    paths:\n      - coverage/\n    when: always\n    reports:\n      junit:\n        - junit.xml\n\nbuild artifact:\n  stage: build\n  image: node:latest\n  before_script:\n    - npm install\n  script:\n    - npm run build\n  artifacts:\n    paths:\n      - build/\n    when: always\n  rules:\n    - if: '$CI_COMMIT_REF_NAME == \"main\"'\n      when: always\n\n\ndeploy s3:\n  stage: deploy\n  image:\n    name: amazon/aws-cli:latest\n    entrypoint: \n      - '/usr/bin/env'\n  id_tokens:\n      ID_TOKEN:\n        aud: react_s3_gl\n  script:\n    - *assume_role\n    - aws s3 sync build/ s3://$S3_BUCKET\n  rules:\n    - if: '$CI_COMMIT_REF_NAME == \"main\"'\n      when: always\n```\n\n### Make a change and test your pipeline\n\nTo test your pipeline, inside of `App.js`, change this line `Edit \u003Ccode>src/App.js\u003C/code> and save to reload.` to \n`This was deployed from GitLab!` and commit your changes to the `main` branch. The pipeline should kick off and when \nit finishes successfully you should see your updated application at the URL of your static website.\n\n![updated app](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/auto_deploy.png){: .shadow}\n\nYou now have a CI/CD pipeline built in GitLab that receives temporary credentials from AWS using OIDC and \nautomatically deploys to your Amazon S3 bucket. To take it a step further, you can [secure your application](https://docs.gitlab.com/ee/user/application_security/secure_your_application.html) \nwith GitLab's built-in security tools.\n\nAll code for this project can be found [here](https://gitlab.com/guided-explorations/engineering-tutorials/react-s3).\n\nCover image by [Lucas van Oor](https://unsplash.com/@switch_dtp_fotografie?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/bucket?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n\n## Related posts and documentation\n- [How to automate testing for a React application with GitLab](/blog/how-to-automate-testing-for-a-react-application-with-gitlab/)\n- [How to deploy AWS with GitLab](/blog/deploy-aws/)\n- [Deploy to AWS from GitLab CI/CD](https://docs.gitlab.com/ee/ci/cloud_deployment/)\n- [Configure OpenID Connect in AWS to retrieve temporary credentials](https://docs.gitlab.com/ee/ci/cloud_services/aws/)\n- [Secure GitLab CI/CD workflows using OIDC JWT on a DevSecOps platform](https://about.gitlab.com/blog/oidc/)\n",[894,110],{"slug":1941,"featured":6,"template":678},"how-to-deploy-react-to-amazon-s3","content:en-us:blog:how-to-deploy-react-to-amazon-s3.yml","How To Deploy React To Amazon S3","en-us/blog/how-to-deploy-react-to-amazon-s3.yml","en-us/blog/how-to-deploy-react-to-amazon-s3",{"_path":1947,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1948,"content":1954,"config":1959,"_id":1961,"_type":16,"title":1962,"_source":17,"_file":1963,"_stem":1964,"_extension":20},"/en-us/blog/code-counting-in-gitlab",{"title":1949,"description":1950,"ogTitle":1949,"ogDescription":1950,"noIndex":6,"ogImage":1951,"ogUrl":1952,"ogSiteName":692,"ogType":693,"canonicalUrls":1952,"schema":1953},"Lightning fast code counting for better code management intelligence","Knowledge of your code composition can come through simple counting of lines of code per language.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682614/Blog/Hero%20Images/noaa-PkHsrwNOfBE-unsplash.jpg","https://about.gitlab.com/blog/code-counting-in-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Lightning fast code counting for better code management intelligence\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2023-02-15\",\n      }",{"title":1949,"description":1950,"authors":1955,"heroImage":1951,"date":1956,"body":1957,"category":14,"tags":1958},[1701],"2023-02-15","\n\nOne of the earliest forms of intelligence was to simply answer the question “How many?”. Counting is one of the first things that we learn as a child. As we grow older, we come to see this deceptively simple concept as somewhat childish. Yet, upon the concept of counting, the entire discipline of statistics is founded. In turn, every discipline that benefits from statistics owes a debt of gratitude to the very humble concept of counting. \n\nMany of the massive data lakes we keep are essentially vast amounts of counting. Using artificial intelligence to analyze this data, we frequently find insights we were not expecting. So it would seem that counting is somewhat of a fractal concept – it’s deceptively simple, but, when compounded, generates delightful things.\n\nSo if we have a thing we are trying to be more intelligent about, our first endeavor might be to count it. Let’s see how to apply that to our code stored in GitLab.\n\n### Why developers count code\n\nThe following list is from real-world scenarios. Many of them are also asserted in Ben Boyter’s blog post [Why count lines of code?](https://boyter.org/posts/why-count-lines-of-code/). Their enumeration here is not an endorsement of the validity or accuracy of code counting for the claimed benefit and the fundamental assumptions of such models are not stated. Because code counting is essentially a form of modeling, it is also subject to George Box’s axiom: “All models are wrong, but some are useful.”\n\n- Showing the languages in a repository using an absolute metric like source lines of code helps to quickly assess if one can contribute to the project, given their own talents. \n- Cost assessment for anything which charges by “lines of code” (some code scanning and development tools may charge this way).\n- Although [research](https://gitlab.com/gitlab-org/gitlab/-/issues/371038) shows that lines of code are not a good metric for measuring contribution, some developers have gotten used to seeing lines of code per contributor. \n- Code base shrinkage as a measure of good architecture (simplification).\n- Anything where the complexity of code affects project agility and costs. For instance, assessing and reporting status on migrating a code base to a new language. \n- Staffing a development team – understanding what language competencies are needed across the team and in what relative proportion to each other or understanding that for the entire organization’s codebase.\n- IT tooling decisions to support the needs of an organization given the most used coding languages across all repositories in the org.\n- Assessment of tech debt.\n\nWhile it is easy to create bad models with any of the above counts, the focus of this post is to get some good counts from which you can carefully build a model.\n\n### Toolsmithing GitLab CI: A working example as a shared CI library\n\nThe easiest way to differentiate between a “toolized” and “templated” solution is that you can simply and easily reuse this exact code without needing to change it. Many formal coding languages have the concept of shared libraries or dependencies that are essentially toolized. A templated solution consists of a starting point that you customize and then have to manage the code yourself. These can function as scaffolding for a starting point for an entire project or snippets of code that do a specific function. The fundamental difference is that when you use a template, you end up owning and managing the resultant code going forward.\n\nIn [GitLab CI](https://docs.gitlab.com/ee/ci/introduction/index.html), we can create our own tooling or dependencies with a few tricks stolen from [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/). These tricks are:\n\n1. Use includes to reference the shared managed library code (this creates a “dependency” on code that is being managed outside of your own).\n2. The includable code must be written like a function where all needed inputs are either passed in, or can be collected from the environment. No hard coding is allowed because that means you’ve created a template which can’t be depended upon directly.\n3. Use GitLab CI ‘functions’. I am coining this term to indicate that in GitLab you can precede a job name with a “.” (dot) and it will not be executed when it is read. Then you can create a new job using all the code in the “dot named” job and add variables by using the `extends:` command keyword. By using dot named jobs in your includable code, the developer consuming the “managed shared CI dependency” can decide when, where, and how to call the toolized code.\n\n### The result: A code-counting GitLab CI extension\n\nHere are some of the final design attributes of this code counting solution:\n\n- Is extremely fast for the given task.\n- Leverages the Git clone opitimizations lessons contained in this article: [How much code do I have? A DevSecOps story](https://acloudguru.com/blog/engineering/how-much-code-do-i-have-a-devsecops-story).\n- Uses the [lightning fast, open source code counting tool SCC](https://github.com/boyter/scc) by Ben Boyter.\n- Is implemented as a reusable GitLab CI shared library extension.\n- Allows configuration of the file extensions that should not be checked out because they do not include source code to be counted.\n- Leverages the GitLab Run Pipeline forms capability.\n- Can enumerate and count an entire group hierarchy in GitLab, or be given a stipulated list.\n- Uses the runner token to access and read repositories by default, but can be given a specific token.\n- Uploads HTML and text artifacts that contain the code counting report.\n- Purposely emits the code counting results into CI logs for easy reference.\n\n### The output\n\nResults are shown below in the CI log but they are also captured as an HTML artifact.\n\nThe clone time is also in the log for each project so that it can be verified that the cloning optimizations are making a substantial difference.\n\nThese particular results are counting all the code in [https://gitlab.com/guided-explorations](https://gitlab.com/guided-explorations).\n\n![codecountingcilog](https://about.gitlab.com/images/blogimages/code-counting-in-gitlab/codecountingcilog.png)\n\n### The code\n\nThe code is available in this project: [https://gitlab.com/guided-explorations/code-metrics/ci-cd-extension-scc](https://gitlab.com/guided-explorations/code-metrics/ci-cd-extension-scc). You can view the scanning results in the job logs of past runs here: [https://gitlab.com/guided-explorations/code-metrics/ci-cd-extension-scc/-/pipelines](https://gitlab.com/guided-explorations/code-metrics/ci-cd-extension-scc/-/pipelines).\n\nRather than fail the entire job when a project fails to clone, the job simply logs the error from an attempted clone. This allows review of valid use cases for not being able to clone and obtains as complete of a picture as possible. The cloning error log is uploaded as a job artifact and emitted to the log.\n\n### Innovation: MR complexity metrics extension\n\nDuring a customer engagement I was asked whether there was a way to assess how much change a Merge Request contained and mark it. This was because an operations team was missing their SLAs for deployments due to the amount of change, and, therefore, risk and review could be highly variable. However, since there was no way to estimate this without human eyes on it, MRs with a high degree of change would overrun their SLA when they couldn’t be pre-triaged.\n\nI wondered if I could use the previously built code counting solution to count diffs and get a rough idea of how much change had occurred in the commits of an MR branch and then apply labels to MRs to give at rough idea of their degree of change as a sort of proxy for how much review time might be required.\n\nIt turned out to be plausible and you can review the [Shared Library in Git Diff Revision Activity Metrics CI EXTENSION](https://gitlab.com/guided-explorations/code-metrics/git-diff-revision-activity-metrics) and see the results in the MRs list of this working example project that uses that code: [MR list for Diff Revision Activity Analytics DEMO](https://gitlab.com/guided-explorations/code-metrics/diff-revision-activity-analytics-demo/-/merge_requests).\n\n### The value of remote work water cooler conversations\n\nI have to let you know why this blog was written now when this solution has been around for quite a while. You often hear about how working remotely does not allow for water cooler conversations, which in the story you’re told are where real innovation happens.\n\nWithin GitLab’s [Remote First culture](/company/culture/all-remote/guide/) it is expected that anyone in the company can schedule a “coffee chat” with anyone else. The cultural expectation is that this is normal and, unless you are getting an overwhelming number of these requests, that when asked, you will find time to socially connect.\n\nI received a coffee chat request from [Torsten Linz](https://gitlab.com/tlinz), the Senior Product Manager for the Source Code Management group, to chat about my comments and linking of a working example to an issue about code counting that he had become aware of. He also wanted to see if I could help get a copy of it working in his GitLab group.\n\nDuring that collaborative time, I discovered that my example was not working because of some major code changes in SCC and because it presumed the GitLab group to be enumerated did not need the counting job to authenticate to prove that it should have access to the projects. While we were collaborating, we fixed these problems and improved the solution to use the SCC binary, rather than depend on working Golang runtimes. After our collaborative session, as I tweaked some more, I did parameter documentation in README.md and debugged the ability to run it either with a group enumeration or a provided list of specific git repos.\n\nSo I owe big thanks to Torsten and to GitLab’s cultural support for remote first water cooler conversations for improving this working example to the point that it is worth sharing with a broader audience. If you’d like to know more, check out the GitLab handbook page: [Informal communication in an all-remote environment](/company/culture/all-remote/informal-communication/).\n\n_Cover image by [NOAA](https://unsplash.com/@noaa?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/lightning-fast?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)_\n",[894,750,832],{"slug":1960,"featured":6,"template":678},"code-counting-in-gitlab","content:en-us:blog:code-counting-in-gitlab.yml","Code Counting In Gitlab","en-us/blog/code-counting-in-gitlab.yml","en-us/blog/code-counting-in-gitlab",{"_path":1966,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1967,"content":1973,"config":1980,"_id":1982,"_type":16,"title":1983,"_source":17,"_file":1984,"_stem":1985,"_extension":20},"/en-us/blog/understanding-and-improving-total-blocking-time",{"title":1968,"description":1969,"ogTitle":1968,"ogDescription":1969,"noIndex":6,"ogImage":1970,"ogUrl":1971,"ogSiteName":692,"ogType":693,"canonicalUrls":1971,"schema":1972},"Total Blocking Time - The metric to know for faster website performance","Learn how to identify and fix some root causes for high Total Blocking Time.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682637/Blog/Hero%20Images/tbt_cover_image.jpg","https://about.gitlab.com/blog/understanding-and-improving-total-blocking-time","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Total Blocking Time - The metric to know for faster website performance\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jacques Erasmus\"}],\n        \"datePublished\": \"2023-02-14\",\n      }",{"title":1968,"description":1969,"authors":1974,"heroImage":1970,"date":1976,"body":1977,"category":14,"tags":1978},[1975],"Jacques Erasmus","2023-02-14","\n\nOur world overwhelms us with information that is more accessible than ever. The increasing rates of content production and consumption are gifts that keep on giving. We can't seem to keep up with the information thrown at us. We're limited by our cognitive limitations and time constraints, and a [recent study](https://www.nature.com/articles/s41467-019-09311-w) concluded the result is a shortening of attention spans. Websites are no exception.\n\nUsers who interact with your website want feedback, and want it fast. Preferably immediately! Website performance has become an important factor in keeping users engaged. But how do you measure how unresponsive a page is before it becomes fully interactive?\n\nMany [performance metrics](https://web.dev/vitals/) exist, but this blog post focuses on Total Blocking Time (TBT).\n\n## What is Total Blocking Time?\n\nTBT measures the total amount of time tasks were blocking your browser's main thread. This metric represents the total amount of time that a user could not interact with your website. It's measured between [First Contentful Paint (FCP)](https://web.dev/fcp/) and [Time to Interactive (TTI)](https://web.dev/tti/), and represents the combined blocking time for all long tasks.\n\n## What is a long task?\n\nA long task is a process that runs on the main thread for longer than 50 milliseconds (ms). After a task starts, a browser can't interrupt it, and a single long-running task can block the main thread. The result: a website that is unresponsive to user input until the task completes.\n\nAfter the first 50 ms, all time spent on a task is counted as _blocking time_. This diagram shows five tasks, two of which block the main thread for 140 ms:\n\n![A diagram containing five tasks, two of which are blocking the main thread. The TBT for these tasks adds up to 140 ms.](https://about.gitlab.com/images/blogimages/tbt/tasks_diagram.png)\n\n## How can we measure TBT?\n\nMany tools measure TBT, but here we’ll use [Chrome DevTools](https://developer.chrome.com/docs/devtools/evaluate-performance/) to analyze runtime performance.\n\nAs an example: We recently improved performance on GitLab's [**View Source** page](https://gitlab.com/gitlab-org/gitlab/-/blob/master/.gitlab-ci.yml). This screenshot, taken before the performance improvement, shows eight long-running tasks containing a TBT of **2388.16 ms**. That's more than **two seconds**:\n\n![A screenshot indicating that there are eight long-running tasks. The TBT of these tasks adds up to 2388.16 ms.](https://about.gitlab.com/images/blogimages/tbt/summary_before.png)\n\n## How can we improve TBT?\n\nAs you might have guessed by now, reducing the time needed to complete long-running tasks reduces TBT.\n\nBy selecting one of the tasks from the previous screenshot, we can get a breakdown of how the browser executed it. This **Bottom-Up** view shows that much time is spent on rendering content in the Document Object Model (DOM):\n\n![A screenshot of the Bottom-Up view of one of tasks from the previous screenshot. It indicates that most of the time is being spent on rendering content in the DOM.](https://about.gitlab.com/images/blogimages/tbt/task_7_before.png)\n\nThis page has a lot of content that is below the fold – not immediately visible. The browser is spending a lot of resources upfront to render content that is not even visible to the user yet!\n\nSo what can we do? Some ideas:\n\n- **Change the UX.**\n  - Add a Show More button, paging, or virtual scrolling for long lists.\n- **Lazy-load images.**\n  ([example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/65745))\n    - Lazy-loading images reduces page weight, allowing the browser to spend resources on more important tasks.\n- **Lazy-load long lists.**\n  ([example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/71633))\n    - Similar to lazy-loading images, this approach allows the browser to spend resources on more important tasks.\n- **Reduce excessive HTML.**\n  ([example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/65835))\n    - For example, when loading large pages consider removing unnecessary content. Or, consider rendering some content (like icons) with CSS instead.\n- **Defer rendering when possible.**\n    - The [`content-visibility: auto;`](https://developer.mozilla.org/en-US/docs/Web/CSS/content-visibility) CSS property ensures the rendering of off-screen elements (and thus irrelevant to the user) is skipped without affecting the page layout. ([example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/67050))\n    - The [Intersection Observer API](https://developer.mozilla.org/en-US/docs/Web/API/Intersection_Observer_API) allows you to observe when elements intersect with the viewport. This information can be used to show or hide certain elements. ([example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/71633))\n    - The global [`requestIdleCallback` method](https://developer.mozilla.org/en-US/docs/Web/API/Window/requestIdleCallback?qs=requestIdleCallback) can be used to render content after the browser goes into an idle state.\n  ([example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/101942/diffs#7eed73783787184e5b1c029b9668e48638f3a6e8_64_78))\n\nFrameworks such as VueJS and React are already heavily optimized. However, be mindful of how you use these frameworks to avoid expensive tasks.\n\n### Change VueJS usage to improve TBT\n\nThis screenshot shows the **Bottom-Up** view of a task. Much of the task time is spent on activities from third-party code in the VueJS framework:\n\n![A screenshot of the Bottom-Up view of one of tasks. It indicates that a lot of the time is being spent on activities in the third-party VueJS framework.](https://about.gitlab.com/images/blogimages/tbt/task_6_before.png)\n\nWhat improvements can we make?\n\n- **Use [Server-side rendering (SSR)](https://gitlab.com/gitlab-org/gitlab/-/issues/215365) or [streaming](https://gitlab.com/gitlab-org/frontend/rfcs/-/issues/101)** for pages that are sensitive to page load performance.\n- **If you don't _need_ Vue, don't use it.**\n  Component instances are a lot more expensive than using plain DOM nodes. Try to avoid unnecessary component abstractions.\n- **Optimize component [props](https://vuejs.org/guide/components/props.html).**\n  Child components in Vue update when at least one of their received props are being updated. Analyze the data that you pass to components. You may find that you can avoid unnecessary updates by making changes to your props strategy.\n- **Use [v-memo](https://vuejs.org/api/built-in-directives.html#v-memo) to skip updates.**\n    - In Vue versions 3.2 and later, `v-memo` enables you to cache parts of your template. The cached template updates and re-renders only if one of its provided dependencies changes.\n- **Use [v-once](https://vuejs.org/api/built-in-directives.html#v-once) for data** that does not need to be reactive after the initial load.\n  ([example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/101942))\n    - `v-once` ensures the element and component are only rendered once. Any future updates will be skipped.\n- **Reduce expensive tasks in your Vue components.**\n  Even a small script may take a long time to finish if it’s not optimized enough. Some suggestions:\n    - By using [`requestIdleCallback`](https://developer.mozilla.org/en-US/docs/Web/API/Window/requestIdleCallback?qs=requestIdleCallback) you can defer the execution of the non-critical tasks. ([example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/101942/diffs#7eed73783787184e5b1c029b9668e48638f3a6e8_64_78))\n    - By executing expensive scripts in [WebWorkers](https://developer.mozilla.org/en-US/docs/Web/API/Web_Workers_API/Using_web_workers) you can unblock the main thread.\n\n### Results and methods\n\nBy using three of the methods suggested above, we reduced TBT from about **3 seconds** to approximately **500 ms**:\n\n![A chart indicating a drop in TBT from ~3 seconds to ~500 milliseconds.](https://about.gitlab.com/images/blogimages/tbt/chart_after.png)\n\nWhat did we do?\n\n- Deferred rendering by using the [`content-visibility: auto;`](https://developer.mozilla.org/en-US/docs/Web/CSS/content-visibility) CSS property.\n- Deferred rendering by using the [Intersection Observer API](https://developer.mozilla.org/en-US/docs/Web/API/Intersection_Observer_API).\n- Used [v-once](https://vuejs.org/api/built-in-directives.html#v-once) for content that didn't need to be reactive after rendering.\n\nRemember, the size of the decrease always depends on how optimized your app already is to begin with.\n\nThere is a lot more we can do to improve TBT. While the specific approach depends on the app you're optimizing, the general methods discussed here are very effective at finding improvement opportunities in any app. Like most things in life, a series of the smallest changes often yield the biggest impact. So let's [iterate](/blog/dont-confuse-these-twelve-shortcuts-with-iteration/) together, and adapt to this ever-changing world.\n\n> “Adaptability is the simple secret of survival.” – Jessica Hagedorn\n\n_Cover image by [Growtika](https://unsplash.com/@growtika?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/Iqi0Rm6gBkQ?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)_\n",[1979,704,726],"frontend",{"slug":1981,"featured":6,"template":678},"understanding-and-improving-total-blocking-time","content:en-us:blog:understanding-and-improving-total-blocking-time.yml","Understanding And Improving Total Blocking Time","en-us/blog/understanding-and-improving-total-blocking-time.yml","en-us/blog/understanding-and-improving-total-blocking-time",{"_path":1987,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1988,"content":1993,"config":1998,"_id":2000,"_type":16,"title":2001,"_source":17,"_file":2002,"_stem":2003,"_extension":20},"/en-us/blog/gitlab-importers",{"title":1989,"description":1990,"ogTitle":1989,"ogDescription":1990,"noIndex":6,"ogImage":1776,"ogUrl":1991,"ogSiteName":692,"ogType":693,"canonicalUrls":1991,"schema":1992},"How to migrate data to GitLab using main importers","Learn about the capabilities of main importers, which are used to import data from external tools and from other GitLab instances.","https://about.gitlab.com/blog/gitlab-importers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to migrate data to GitLab using main importers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2023-02-13\",\n      }",{"title":1989,"description":1990,"authors":1994,"heroImage":1776,"date":1995,"body":1996,"category":14,"tags":1997},[1019],"2023-02-13","\n\nA typical organization looking to adopt GitLab already uses many other tools. Artifacts such as code, build pipelines, issues, and epics will already exist and be changed daily. A seamless transition of work in progress is, therefore, critically important when importing data. GitLab importers aim to make this process easy and reliable, ensuring data is imported quickly and with maximum care.\n\nAt GitLab, a dedicated development team, named group:import, creates a seamless experience when importing data into GitLab or from one GitLab instance to another. This team continuously develops and improves the importing experience and keeps our importers up to date with new features and capabilities.\n\n## Migrate groups by direct transfer\n\nUsing group migration, you can import groups from one GitLab instance to another instance. The most common use case is to import groups from self-managed GitLab instances to GitLab.com (GitLab SaaS). With the group migration, you can migrate many groups in a single click.\n\n### Which items are imported?\n\nThe group migration imports the entire group structure, including all the sub groups and projects in them. Currently, to import projects as part of the group migration on self-managed GitLab, the administrator needs to enable the feature flag named `bulk_import_projects`. On GitLab.com, our SaaS offering, migration of both groups and projects is available. More information can be found in our [documentation](https://docs.gitlab.com/ee/user/group/import/#migrate-groups-by-direct-transfer-recommended).\n\nThe team continuously adds objects to the migration, but not all group items are imported. The docs cover the [items that are imported](https://docs.gitlab.com/ee/user/group/import/#migrated-group-items). \n\n### How can groups be imported?\n\nIt is very simple to import groups between two instances. Here are the steps: \n\n- Create a new group or subgroup in the designated instance \n- Select \"Import group\" \n- Connect to the remote instance with your [personal access token](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html)\n- Select the source groups you want to import \n- Click \"Import xyz groups\"\n\n![bulk_imports_v14_1](https://about.gitlab.com/images/blogimages/2022-11-15-gitlab-importers/bulk_imports_v14_1.png)\n\n## File-based import/export (the previously used method)\n\nGroup migration is the preferred method to migrate content from one GitLab instance to another, as it automates the process and you can import many groups in a single click. However, for some use cases, such as air-gapped networks when you don't have network connection between the two instances, or when you have environments with limited connectivity, the group migration won't help because it requires connection between the two instances. File-based export/import for [groups](https://docs.gitlab.com/ee/user/group/settings/import_export.html) and [projects](https://docs.gitlab.com/ee/user/project/settings/import_export.html) can be used when there is no connectivity between the instances. \n\nFile-based export/import is a manual process and requires a few steps in order to migrate each group or project. The file-based import/export is available from the UI and in the API. The team plans to disable it by a feature flag soon to encourage users to use group migration. However, you will be able to enable the feature flag in your instance if your use case requires the file-based import/export. More info can be found in this [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/363406).\n\n## Import projects from external tools  \n\nGitLab has built-in support for import projects from [a variety of tools](https://docs.gitlab.com/ee/user/project/import/).\n\nThe GitHub importer is the most common importer and, therefore, the team invests a lot of effort to add more migrated components. GitLab and GitHub have different structure and architecture, so sometimes it is tricky to import objects from GitHub when the migrated components are implemented differently in GitLab. So the team needs to find creative ways to map some of the features or configurations. This is an example [epic](https://gitlab.com/groups/gitlab-org/-/epics/8585 ) with a proposal to map rules for protected branches when migrating GitHub protected rules. \n\n\n### What can be imported from GitHub to GitLab?\n\n- Repository description\n- Git repository data\n- Branch protection rules\n- Issues\n- Pull requests\n- Wiki pages\n- Milestones\n- Labels\n- Pull request review comments\n- Regular issue and pull request comments\n- Attachments for\n    - Release notes\n    - Comments and notes\n    - Issue description\n    - Merge Request description\n- Git Large File Storage (LFS) objects\n- Pull request reviews \n- Pull request “merged by” information \n- Pull request comments replies in discussions \n- Diff notes suggestions \n- Release note descriptions\n\nHere is a [full list of imported data](https://docs.gitlab.com/ee/user/project/import/github.html#imported-data).\n\nRead what's next in our [GitHub Epic](https://gitlab.com/groups/gitlab-org/-/epics/2984). \n\n### Repository by URL\n\nAn alternative way to import external projects is the Repository by URL option. You can import any Git repository through HTTP from the *Import Project* page, by choosing \"Repository by URL\".\n\nTo learn more about the Importer direction, roadmap, etc., refer to [Category Direction - Importers](/direction/manage/import_and_integrate/importers/).\n\n_Cover image by [Conny Schneider](https://unsplash.com/@choys_?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyTex) on [Unsplash](https://unsplash.com/s/photos/data-migration?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)_\n",[726,894,704],{"slug":1999,"featured":6,"template":678},"gitlab-importers","content:en-us:blog:gitlab-importers.yml","Gitlab Importers","en-us/blog/gitlab-importers.yml","en-us/blog/gitlab-importers",{"_path":2005,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2006,"content":2012,"config":2018,"_id":2020,"_type":16,"title":2021,"_source":17,"_file":2022,"_stem":2023,"_extension":20},"/en-us/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab",{"title":2007,"description":2008,"ogTitle":2007,"ogDescription":2008,"noIndex":6,"ogImage":2009,"ogUrl":2010,"ogSiteName":692,"ogType":693,"canonicalUrls":2010,"schema":2011},"GitOps with GitLab: What you need to know about the Flux CD integration","Inside the decision to integrate Flux CD with the GitLab agent for Kubernetes and what it means to you.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678356/Blog/Hero%20Images/balance-speed-security-devops.jpg","https://about.gitlab.com/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitOps with GitLab: What you need to know about the Flux CD integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2023-02-08\",\n      }",{"title":2007,"description":2008,"authors":2013,"heroImage":2009,"date":2015,"body":2016,"category":14,"tags":2017},[2014],"Viktor Nagy","2023-02-08","\n\nIn January, [we decided to integrate Flux CD with the GitLab agent for Kubernetes](https://gitlab.com/gitlab-org/gitlab/-/issues/357947). [Flux CD](https://fluxcd.io/) is a mature GitOps solution and one of the market leaders in the area. We have since decided to make Flux CD our recommended approach to do GitOps with GitLab – previously, the agent for Kubernetes alone was the recommended approach. Let's discuss what this change means for current users and what our plans are for the integration.\n\nFirst of all, let's remove the most worrying thought from the agenda: We are not deprecating any agent for Kubernetes functionality at this point. The GitOps offering remains fully supported and transitions to maintenance mode. We plan to deprecate it with at least one year of removal time once we consider the Flux integration solid. As a result, the removal is unlikely before the GitLab 17.0 release, which is expected in 2024. We are looking into providing tooling to facilitate (or automate) the migration once the time comes. If you use the agent for Kubernetes for GitOps, you don't have to do anything at this time.\n\nThis change does not affect the agent's other non-GitOps functionality either. The [CI/CD pipeline integration](https://docs.gitlab.com/ee/user/clusters/agent/ci_cd_workflow.html) and [operational container scanning](https://docs.gitlab.com/ee/user/clusters/agent/vulnerabilities.html) remain intact, and we will continue investing in them.\n\n## What to expect from this change\n\nFrom now on, instead of building our solution for GitOps, we will focus on supporting Flux and improving its user experience when it is used together with GitLab. Flux CD will become the recommended tool to do GitOps with GitLab. Initially, we will provide documentation on the Flux setup we recommend for our users while we focus on building out various integrations.\n\nIn terms of the integrations, we are looking at providing a UI built into GitLab. You might also be able to use the UI with other tools, including the CI pipeline integration of the agent, but it will work best with deployments managed by Flux. Besides the UI integration, we want to streamline Flux's access management. Flux accesses GitLab through the regular GitLab front door. As a result, it needs to authenticate with a token, requests might be rate-limited, and, in general, it does not seem to be the most efficient way to do its job. We plan to simplify this for our users to avoid the necessity of managing dozens of deploy keys and to decrease the load on GitLab at the same time.\n\n## Why Flux?\n\nWhy did we choose Flux CD instead of something else? We evaluated several options. There are other open-source GitOps tools. The biggest contender was [ArgoCD](https://argoproj.github.io/cd), another mature Cloud Native Computing Foundation project in the GitOps space. ArgoCD is a full-featured product for GitOps, while Flux is a GitOps toolkit. While we like and value ArgoCD a lot, we think it does not lend itself to integration with GitLab.\n\nAs we are already in the process of building out UI integrations with the cluster, we know how the GitLab UI will be able to reach the Kubernetes API. Flux relies on the standard Kubernetes API 100%, so we can easily integrate it into our UI access approach. Relying only on the Kubernetes API is a significant benefit over ArgoCD, which provides a custom API.\n\nBesides going with another tool, we evaluated the work needed to build a competitive, in-house solution. We found in-house development is the strongest contender to Flux CD, and while it was very compelling, we decided to go with the integration instead. We believe this should give our customers more value faster than a custom solution. Moreover, it should enable existing Flux users to benefit from our integrations with minor modifications in their usage patterns as we roll out the integrations.\n\n## What comes next?\n\nFirst, we want to [document our recommendations for using FluxCD with GitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/389382). At the same time, we will change our GitOps documentation to recommend Flux instead of the legacy GitOps solution. We consider these the most important steps to minimize uncertainty and set you up for a successful start.\n\nTogether with the above, the team is working hard on shipping the first version of an [integrated Kubernetes UI](https://gitlab.com/gitlab-org/gitlab/-/issues/375449). We are starting with an environment overview and build an [entire Kubernetes dashboard](https://gitlab.com/groups/gitlab-org/-/epics/2493) as part of GitLab. The cluster UI integration will enable GitLab users to learn more about their cluster state without leaving the GitLab UI and should allow a nearly real-time view of GitOps deployments using Flux CD.\n\nWe have clear ideas on how to do what I described above. We are still researching and learning about many other topics, including [how to simplify Flux best accessing GitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/389393). If you have experience using Flux with GitLab and have any feedback, recommendations, or requests on what the integration should support, we would like to hear from you. Please, reach out to me using [my GitLab profile](https://gitlab.com/nagyv-gitlab).\n\n## The Flux community\n\nBefore I close this article, I would like to say hi and thank you to the Flux community. We already got invited to the Flux development meeting, and the core team was very welcoming. As we always actively contributed to the core tools – first [`gitops-engine`](https://github.com/argoproj/gitops-engine/), later [`cli-utils`](https://github.com/kubernetes-sigs/cli-utils/) – supporting our GitOps offering, we are looking forward to contributing to Flux CD.\n\nWe are looking forward to working more closely with you. Thank you for building this great tool and community!\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n\nRead more:\n\n- More about the [Flux CD integration decision](https://gitlab.com/gitlab-org/gitlab/-/issues/357947) \n- Docs for [agent for Kubernetes](https://docs.gitlab.com/ee/user/clusters/agent/ci_cd_workflow.html) \n- Issue on [our current focus](https://gitlab.com/gitlab-org/gitlab/-/issues/389382) \n- Preparation issues: [Flux to GitLab access management](https://gitlab.com/gitlab-org/gitlab/-/issues/389393) and [Visualizing Kubernetes resources within the Environments page](https://gitlab.com/gitlab-org/gitlab/-/issues/375449)\n\n",[535,1002,894,676],{"slug":2019,"featured":6,"template":678},"why-did-we-choose-to-integrate-fluxcd-with-gitlab","content:en-us:blog:why-did-we-choose-to-integrate-fluxcd-with-gitlab.yml","Why Did We Choose To Integrate Fluxcd With Gitlab","en-us/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab.yml","en-us/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab",{"_path":2025,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2026,"content":2031,"config":2036,"_id":2038,"_type":16,"title":2039,"_source":17,"_file":2040,"_stem":2041,"_extension":20},"/en-us/blog/efficient-devsecops-workflows-hands-on-python-gitlab-api-automation",{"ogTitle":2027,"schema":2028,"ogImage":1498,"ogDescription":2029,"ogSiteName":692,"noIndex":6,"ogType":693,"ogUrl":2030,"title":2027,"canonicalUrls":2030,"description":2029},"Efficient DevSecOps workflows: Hands-on python-gitlab API automation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Efficient DevSecOps workflows: Hands-on python-gitlab API automation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2023-02-01\",\n      }","The python-gitlab library is a useful abstraction layer for the GitLab API. Dive into hands-on examples and best practices in this tutorial.","https://about.gitlab.com/blog/efficient-devsecops-workflows-hands-on-python-gitlab-api-automation",{"title":2027,"description":2029,"authors":2032,"heroImage":1498,"date":2033,"body":2034,"category":14,"tags":2035},[1504],"2023-02-01","A friend once said in a conference presentation, “Manual work is a bug.\" When there are repetitive tasks in workflows, I tend to [come back to this quote](https://twitter.com/dnsmichi/status/1574087419237916672), and try to automate as much as possible. For example, by querying a REST API to do an inventory of settings, or calling API actions to create new comments in GitLab issues/merge requests. The interaction with the GitLab REST API can be done in different ways, using HTTP requests with curl (or [hurl](/blog/how-to-continously-test-web-apps-apis-with-hurl-and-gitlab-ci-cd/)) on the command line, or by writing a script in a programming language. The latter can become reinventing the wheel again with raw HTTP requests code, and parsing the JSON responses.\n\nThanks to the wider GitLab community, many different languages are supported by API abstraction libraries. They provide support for all API attributes, add helper functions to get/create/delete objects, and generally aim to help developers focus. The [python-gitlab library](https://python-gitlab.readthedocs.io/en/stable/) is a feature-rich and easy-to-use library written in Python.\n\nIn this blog post, you will learn about the basic usage of the library by working with API objects, attributes, pagination and resultsets, and dive into more concrete use cases collecting data, printing summaries and writing data to the API to create comments and commits. There is a whole lot more to learn, with many of the use cases inspired by wider community questions on the forum, Hacker News, issues, etc.\n\nThis blog post is a long read, so feel free to stick with the beginner's tutorial or skip to the advanced [DevSecOps](https://about.gitlab.com/topics/devsecops/) use cases, development tips and code optimizations by navigating the table of contents:\n\n- [Getting started](#getting-started)\n- [Configuration](#configuration)\n- [Managing objects: The GitLab Object](#managing-objects-the-gitlab-object)\n    - [Objects managers and loading](#objects-managers-and-loading)\n    - [Pagination of results](#pagination-of-results)\n    - [Working with object relationships](#working-with-object-relationships)\n    - [Working with different object collection scopes](#working-with-different-object-collection-scopes)\n- [DevSecOps use cases for API read actions](#devsecops-use-cases-for-api-read-actions)\n    - [List branches by merged state](#list-branches-by-merged-state)\n    - [Print project settings for review: MR approval rules](#print-project-settings-for-review-mr-approval-rules)\n    - [Inventory: Get all CI/CD variables that are protected or masked](#inventory-get-all-cicd-variables-that-are-protected-or-masked)\n    - [Download a file from the repository](#download-a-file-from-the-repository)\n    - [Migration help: List all certificate-based Kubernetes clusters](#migration-help-list-all-certificate-based-kubernetes-clusters)\n    - [Team efficiency: Check if existing merge requests need to be rebased after merging a huge refactoring MR](#team-efficiency-check-if-existing-merge-requests-need-to-be-rebased-after-merging-a-huge-refactoring-mr)\n- [DevSecOps use cases for API write actions](#devsecops-use-cases-for-api-write-actions)\n    - [Move epics between groups](#move-epics-between-groups)\n    - [Compliance: Ensure that project settings are not overridden](#compliance-ensure-that-project-settings-are-not-overridden)\n    - [Taking notes, generate due date overview](#taking-notes-generate-due-date-overview)\n    - [Create issue index in a Markdown file, grouped by labels](#create-issue-index-in-a-markdown-file-grouped-by-labels)\n- [Advanced DevSecOps workflows](#advanced-devsecops-workflows)\n    - [Container images to run API scripts](#container-images-to-run-api-scripts)\n    - [CI/CD integration: Release and changelog generation](#cicd-integration-release-and-changelog-generation)\n    - [CI/CD integration: Pipeline report summaries](#cicd-integration-pipeline-report-summaries)\n- [Development tips](#development-tips)\n    - [Advanced custom configuration](#advanced-custom-configuration)\n    - [CI/CD code linting for different Python versions](#cicd-code-linting-for-different-python-versions)\n- [Optimize code and performance](#optimize-code-and-performance)\n    - [Lazy objects](#lazy-objects)\n    - [Object-oriented programming](#object-oriented-programming)\n- [More use cases](#more-use-cases)\n- [Conclusion](#conclusion)\n\n## Getting started\n\nThe python-gitlab documentation is a great resource for [getting started guides](https://python-gitlab.readthedocs.io/en/stable/api-usage.html), object types and their available methods, and combined workflow examples. Together with the [GitLab API resources documentation](https://docs.gitlab.com/ee/api/api_resources.html), which provides the object attributes that can be used, these are the best resources to get going.\n\nThe code examples in this blog post require Python 3.8+, and the `python-gitlab` library. Additional requirements are specified in the `requirements.txt` file – one example requires `pyyaml` for YAML config parsing. To follow and practice the use cases code, it is recommended to clone the project, install the requirements and run the scripts. Example with Homebrew on macOS:\n\n```shell\ngit clone https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python.git\n\ncd gitlab-api-python\n\nbrew install python\n\npip3 install -r requirements.txt\n\npython3 \u003Cscriptname>.py\n```\n\nThe scripts are intentionally not using a common shared library that provides generic functions for parameter reads, or additional helper functionality, for example. The idea is to show easy-to-follow examples that can be used stand-alone for testing, and only require installing the `python-gitlab` library as a dependency. Improving the code for production use is recommended. This can also help with building a maintained API tooling project that, for example, includes container images and CI/CD templates for developers to consume on a DevSecOps platform.\n\n## Configuration\n\nWithout configuration, python-gitlab will run unauthenticated requests against the default server `https://gitlab.com`. The most common configuration settings relate to the GitLab instance to connect to, and the authentication method by specifying access tokens. Python-gitlab supports different types of configuration: A configuration file or environment variables.\n\nThe [configuration file](https://python-gitlab.readthedocs.io/en/stable/cli-usage.html#cli-configuration) is available for the API library bindings, and the CLI (the CLI is not explained in this blog post). The configuration file supports [credential helpers](https://python-gitlab.readthedocs.io/en/stable/cli-usage.html#credential-helpers) to access tokens directly.\n\nEnvironment variables as an alternative configuration method provide an easy way to run the script on terminal, integrate into container images, and prepare them for running in CI/CD pipelines.\n\nThe configuration needs to be loaded into the Python script context. Start by importing the `os` library to fetch environment variables using the `os.environ.get()` method. The first parameter specifies the key, the second parameter sets the default value when the variable is not available in the environment.\n\n```python\nimport os\n\ngl_server = os.environ.get('GL_SERVER', 'https://gitlab.com')\n\nprint(gl_server)\n```\n\nThe parametrization on the terminal can happen directly for the command only, or exported into the shell environment.\n\n```shell\n$ GL_SERVER=’https://gitlab.company.com’ python3 script.py\n\n$ export GL_SERVER=’https://gitlab.company.com’\n$ python3 script.py\n```\n\nIt is recommended to add safety checks to ensure that all variables are set before continuing to run the program. The following snippet imports the required libraries, reads the `GL_SERVER` environment variable and expects the user to set the `GL_TOKEN` variable. If not, the script prints and throws errors, and calls `sys.exit(1)` indicating an error status.\n\n```python\nimport gitlab\nimport os\nimport sys\n\nGITLAB_SERVER = os.environ.get('GL_SERVER', 'https://gitlab.com')\nGITLAB_TOKEN = os.environ.get('GL_TOKEN')\n\nif not GITLAB_TOKEN:\n    print(\"Please set the GL_TOKEN env variable.\")\n    sys.exit(1)\n```\n\nWe will look into a more detailed example now which creates a connection to the API and makes an actual data request.\n\n## Managing objects: The GitLab object\n\nAny interaction with the API requires the GitLab object to be instantiated. This is the entry point to configure the GitLab server to connect, authenticate using access tokens, and more global settings for pagination, object loading and more.\n\nThe following example runs an unauthenticated request against GitLab.com. It is possible to access public API endpoints and for example get a specific [.gitignore template for Python](https://python-gitlab.readthedocs.io/en/stable/gl_objects/templates.html#gitignore-templates).\n\n[python_gitlab_object_unauthenticated.py](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/python_gitlab_object_unauthenticated.py)\n\n```python\nimport gitlab\n\ngl = gitlab.Gitlab()\n\n# Get .gitignore templates without authentication\ngitignore_templates = gl.gitignores.get('Python')\n\nprint(gitignore_templates.content)\n```\n\nThe next sections provide more insights into:\n\n- [Objects managers and loading](#objects-managers-and-loading)\n- [Pagination of results](#pagination-of-results)\n- [Working with object relationships](#working-with-object-relationships)\n- [Working with different object collection scopes](#working-with-different-object-collection-scopes)\n\n### Objects managers and loading\n\nThe python-gitlab library provides access to GitLab resources using so-called “[managers](https://python-gitlab.readthedocs.io/en/stable/api-usage.html#managers)\". Each manager type implements methods to work with the datasets (list, get, etc.).\n\nThe script shows how to access subgroups, direct projects, all projects including subgroups, issues, epics and todos. These methods and API endpoint require authentication to access all attributes. The code snippet, therefore, uses variables to get the authentication token, and also uses the `GROUP_ID` variable to specify a main group at which to start searching.\n\n```python\n#!/usr/bin/env python\n\nimport gitlab\nimport os\nimport sys\n\nGITLAB_SERVER = os.environ.get('GL_SERVER', 'https://gitlab.com')\n# https://gitlab.com/gitlab-de/use-cases/\nGROUP_ID = os.environ.get('GL_GROUP_ID', 16058698)\nGITLAB_TOKEN = os.environ.get('GL_TOKEN')\n\nif not GITLAB_TOKEN:\n    print(\"Please set the GL_TOKEN env variable.\")\n    sys.exit(1)\n\ngl = gitlab.Gitlab(GITLAB_SERVER, private_token=GITLAB_TOKEN)\n\n# Main\nmain_group = gl.groups.get(GROUP_ID)\n\nprint(\"Sub groups\")\nfor sg in main_group.subgroups.list():\n    print(\"Subgroup name: {sg}\".format(sg=sg.name))\n\nprint(\"Projects (direct)\")\nfor p in main_group.projects.list():\n    print(\"Project name: {p}\".format(p=p.name))\n\nprint(\"Projects (including subgroups)\")\nfor p in main_group.projects.list(include_subgroups=True, all=True):\n     print(\"Project name: {p}\".format(p=p.name))\n\nprint(\"Issues\")\nfor i in main_group.issues.list(state='opened'):\n    print(\"Issue title: {t}\".format(t=i.title))\n\nprint(\"Epics\")\nfor e in main_group.issues.list():\n    print(\"Epic title: {t}\".format(t=e.title))\n\nprint(\"Todos\")\nfor t in gl.todos.list(state='pending'):\n    print(\"Todo: {t} url: {u}\".format(t=t.body, u=t.target_url\n```\n\nYou can run the script [`python_gitlab_object_manager_methods.py`](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/python_gitlab_object_manager_methods.py) by overriding the `GROUP_ID` variable on GitLab.com SaaS for your own group to analyze. The `GL_SERVER` variable needs to be specified for self-managed instance targets. `GL_TOKEN` must provide the personal access token.\n\n```shell\nexport GL_TOKEN=xxx\n\nexport GL_SERVER=”https://gitlab.company.com”\n\nexport GL_SERVER=”https://gitlab.com”\n\nexport GL_GROUP_ID=1234\n\npython3 python_gitlab_object_manager_methods.py\n```\n\nGoing forward, the example snippets won’t show the Python headers and environment variable parsing to focus on the algorithm and functionality. All scripts are open source under the MIT license and available in [this project](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python).\n\n### Pagination of results\n\nBy default, the GitLab API does not return all result sets and requires the clients to use [pagination](https://docs.gitlab.com/ee/api/rest/index.html#pagination) to iterate through all result pages. The python-gitlab library [allows users to specify the settings](https://python-gitlab.readthedocs.io/en/stable/api-usage.html#pagination) globally in the GitLab object, or on each `list()` call. By default, all result sets would fire API requests, which can slow down the script execution. The recommended way is using `iterator=True` which returns a generator object, and API calls are fired on-demand when accessing the object.\n\nThe following example searches for the group name `everyonecancontribute`, and uses keyset pagination with 100 results on each page. The iterator is set to true on `gl.groups.list(iterator=True)` to fetch new result sets on demand. If the searched group name is found, the loop breaks and prints a summary, including measuring the duration of the complete search request.\n\n```python\nSEARCH_GROUP_NAME=\"everyonecancontribute\"\n\n# Use keyset pagination\n# https://python-gitlab.readthedocs.io/en/stable/api-usage.html#pagination\ngl = gitlab.Gitlab(GITLAB_SERVER, private_token=GITLAB_TOKEN,\n    pagination=\"keyset\", order_by=\"id\", per_page=100)\n\n# Iterate over the list, and fire new API calls in case the result set does not match yet\ngroups = gl.groups.list(iterator=True)\n\nfound_page = 0\nstart = timer()\n\nfor group in groups:\n    if SEARCH_GROUP_NAME == group.name:\n        # print(group) # debug\n        found_page = groups.current_page\n        break\n\nend = timer()\n\nduration = f'{end-start:.2f}'\n\nif found_page > 0:\n    print(\"Pagination API example for Python with GitLab{desc} - found group {g} on page {p}, duration {d}s\".format(\n        desc=\", the DevSecOps platform\", g=SEARCH_GROUP_NAME, p=found_page, d=duration))\nelse:\n    print(\"Could not find group name '{g}', duration {d}\".format(g=SEARCH_GROUP_NAME, d=duration))\n```\n\nExecuting `python_gitlab_pagination.py` found the [everyonecancontribute group](https://gitlab.com/everyonecancontribute) on page 5.\n\n```shell\n$ python3 python_gitlab_pagination.py\nPagination API example for Python with GitLab, the DevSecOps platform - found group everyonecancontribute on page 5, duration 8.51s\n```\n\n### Working with object relationships\n\nWhen working with object relationships – for example, collecting all projects in a given group – additional steps need to be taken. The returned project objects provide limited attributes by default. Manageable objects require an additional `get()` call which requests the full project object from the API in the background. This on-demand workflow helps to avoid waiting times and traffic by reducing the immediately returned attributes.\n\nThe following example illustrates the problem by looping through all projects in a group, and tries to call the `project.branches.list()` function, raising an exception in the try/except flow. The second example gets a manageable project object and tries the function call again.\n\n```python\n# Main\ngroup = gl.groups.get(GROUP_ID)\n\n# Collect all projects in group and subgroups\nprojects = group.projects.list(include_subgroups=True, all=True)\n\nfor project in projects:\n    # Try running a method on a weak object\n    try:\n       print(\"🤔 Project: {pn} 💡 Branches: {b}\\n\".format(\n        pn=project.name,\n        b=\", \".join([x.name for x in project.branches.list()])))\n    except Exception as e:\n        print(\"Got exception: {e} \\n ===================================== \\n\".format(e=e))\n\n    # Retrieve a full manageable project object\n    # https://python-gitlab.readthedocs.io/en/stable/gl_objects/groups.html#examples\n    manageable_project = gl.projects.get(project.id)\n\n    # Print a method available on a manageable object\n    print(\"🤔 Project: {pn} 💡 Branches: {b}\\n\".format(\n        pn=manageable_project.name,\n        b=\", \".join([x.name for x in manageable_project.branches.list()])))\n```\n\nThe exception handler in the python-gitlab library prints the error message, and also links to the documentation. It is helpful to take a debugging note that objects might not be available to manage whenever you cannot access object attributes or function calls.\n\n```shell\n$ python3 python_gitlab_manageable_objects.py\n\n🤔 Project: GitLab API Playground 💡 Branches: cicd-demo-automated-comments, docs-mr-approval-settings, main\n\nGot exception: 'GroupProject' object has no attribute 'branches'\n\n\u003Cclass 'gitlab.v4.objects.projects.GroupProject'> was created via a\nlist() call and only a subset of the data may be present. To ensure\nall data is present get the object using a get(object.id) call. For\nmore details, see:\n\nhttps://python-gitlab.readthedocs.io/en/v3.8.1/faq.html#attribute-error-list\n =====================================\n```\n\nThe full script is located [here](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/python_gitlab_manageable_objects.py).\n\n### Working with different object collection scopes\n\nSometimes, the script needs to collect all projects from a self-managed instance, or from a group with subgroups, or from a single project. The latter is helpful for faster testing on the required attributes, and the group fetch helps with testing at scale later. The following snippet collects all project objects into the `projects` list, and appends objects from different incoming configuration. You will also see the manageable object pattern for project in groups again.\n\n```python\n    # Collect all projects, or prefer projects from a group id, or a project id\n    projects = []\n\n    # Direct project ID\n    if PROJECT_ID:\n        projects.append(gl.projects.get(PROJECT_ID))\n\n    # Groups and projects inside\n    elif GROUP_ID:\n        group = gl.groups.get(GROUP_ID)\n\n        for project in group.projects.list(include_subgroups=True, all=True):\n            # https://python-gitlab.readthedocs.io/en/stable/gl_objects/groups.html#examples\n            manageable_project = gl.projects.get(project.id)\n            projects.append(manageable_project)\n\n    # All projects on the instance (may take a while to process)\n    else:\n        projects = gl.projects.list(get_all=True)\n```\n\nThe full example is located in [this script](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/get_mr_approval_rules.py) for listing MR approval rules settings for specified project targets.\n\n## DevSecOps use cases for API read actions\n\nThe authenticated access token needs [`read_api` scope](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#personal-access-token-scopes).\n\nThe following use cases are discussed:\n\n- [List branches by merged state](#list-branches-by-merged-state)\n- [Print project settings for review: MR approval rules](#print-project-settings-for-review-mr-approval-rules)\n- [Inventory: Get all CI/CD variables that are protected or masked](#inventory-get-all-cicd-variables-that-are-protected-or-masked)\n- [Download a file from the repository](#download-a-file-from-the-repository)\n- [Migration help: List all certificate-based Kubernetes clusters](#migration-help-list-all-certificate-based-kubernetes-clusters)\n- [Team efficiency: Check if existing merge requests need to be rebased after merging a huge refactoring MR](#team-efficiency-check-if-existing-merge-requests-need-to-be-rebased-after-merging-a-huge-refactoring-mr)\n\n### List branches by merged state\n\nA common ask is to do some Git housekeeping in the project, and see how many merged and unmerged branches are floating around. [A question on the GitLab community forum](https://forum.gitlab.com/t/python-gitlab-project-branch-list-filter/80257) about filtering branch listings inspired me look into writing a [script](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/get_branches_by_state.py) that helps achieve this goal. The `branches.list()` method returns all branch objects that are stored in a temporary list for later processing for two loops: Collecting merged branch names, and not merged branch names. The `merged` attribute on the `branch` object is a boolean value indicating whether the branch has been merged.\n\n```python\nproject = gl.projects.get(PROJECT_ID, lazy=False, pagination=\"keyset\", order_by=\"updated_at\", per_page=100)\n\n# Get all branches\nreal_branches = []\nfor branch in project.branches.list():\n    real_branches.append(branch)\n\nprint(\"All branches\")\nfor rb in real_branches:\n    print(\"Branch: {b}\".format(b=rb.name))\n\n# Get all merged branches\nmerged_branches_names = []\nfor branch in real_branches:\n    if branch.default:\n        continue # ignore the default branch for merge status\n\n    if branch.merged:\n        merged_branches_names.append(branch.name)\n\nprint(\"Branches merged: {b}\".format(b=\", \".join(merged_branches_names)))\n\n# Get un-merged branches\nnot_merged_branches_names = []\nfor branch in real_branches:\n    if branch.default:\n        continue # ignore the default branch for merge status\n\n    if not branch.merged:\n        not_merged_branches_names.append(branch.name)\n\nprint(\"Branches not merged: {b}\".format(b=\", \".join(not_merged_branches_names)))\n```\n\nThe workflow is intentionally a step-by-step read, you can practice optimizing the Python code for the conditional branch name collection.\n\n\n### Print project settings for review: MR approval rules\n\nThe following [script](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/get_mr_approval_rules.py) walks through all collected project objects, and checks whether approval rules are specified. If the list length is greater than zero, it loops over the list and prints the settings using a JSON pretty-print method.\n\n```python\n    # Loop over projects and print the settings\n    # https://python-gitlab.readthedocs.io/en/stable/gl_objects/merge_request_approvals.html\n    for project in projects:\n        if len(project.approvalrules.list()) > 0:\n            #print(project) #debug\n            print(\"# Project: {name}, ID: {id}\\n\\n\".format(name=project.name_with_namespace, id=project.id))\n            print(\"[MR Approval settings]({url}/-/settings/merge_requests)\\n\\n\".format(url=project.web_url))\n\n            for ar in project.approvalrules.list():\n                print(\"## Approval rule: {name}, ID: {id}\".format(name=ar.name, id=ar.id))\n                print(\"\\n```json\\n\")\n                print(json.dumps(ar.attributes, indent=2)) # TODO: can be more beautiful, but serves its purpose with pretty print JSON\n                print(\"\\n```\\n\")\n\n```\n\n### Inventory: Get all CI/CD variables that are protected or masked\n\n[CI/CD variables](https://docs.gitlab.com/ee/ci/variables/) are helpful for pipeline parameterization, and can be configured globally on the instance, in groups and in projects. Secrets, passwords and otherwise sensitive information could be stored there, too. Sometimes it can be necessary to get an overview of all CI/CD variables that are either protected or masked to get a sense of how many variables need to be updated when rotating tokens for example.\n\nThe following [script](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/get_all_cicd_variables_masked_or_protected.py) gets all groups and projects and tries to collect the CI/CD variables from the global instance (requires admin permissions), groups and projects (requires maintainer/owner permissions). It prints all CI/CD variables that are either protected or masked, adding that a potential secret value is stored.\n\n```python\n#!/usr/bin/env python\n\nimport gitlab\nimport os\nimport sys\n\n# Helper function to evaluate secrets and print the variables\ndef eval_print_var(var):\n    if var.protected or var.masked:\n        print(\"🛡️🛡️🛡️ Potential secret: Variable '{name}', protected {p}, masked: {m}\".format(name=var.key,p=var.protected,m=var.masked))\n\nGITLAB_SERVER = os.environ.get('GL_SERVER', 'https://gitlab.com')\nGITLAB_TOKEN = os.environ.get('GL_TOKEN') # token requires maintainer+ permissions. Instance variables require admin access.\nPROJECT_ID = os.environ.get('GL_PROJECT_ID') #optional\nGROUP_ID = os.environ.get('GL_GROUP_ID', 8034603) # https://gitlab.com/everyonecancontribute\n\nif not GITLAB_TOKEN:\n    print(\"🤔 Please set the GL_TOKEN env variable.\")\n    sys.exit(1)\n\ngl = gitlab.Gitlab(GITLAB_SERVER, private_token=GITLAB_TOKEN)\n\n# Collect all projects, or prefer projects from a group id, or a project id\nprojects = []\n# Collect all groups, or prefer group from a group id\ngroups = []\n\n# Direct project ID\nif PROJECT_ID:\n    projects.append(gl.projects.get(PROJECT_ID))\n\n# Groups and projects inside\nelif GROUP_ID:\n    group = gl.groups.get(GROUP_ID)\n\n    for project in group.projects.list(include_subgroups=True, all=True):\n        # https://python-gitlab.readthedocs.io/en/stable/gl_objects/groups.html#examples\n        manageable_project = gl.projects.get(project.id)\n        projects.append(manageable_project)\n\n    groups.append(group)\n\n# All projects/groups on the instance (may take a while to process, use iterators to fetch on-demand).\nelse:\n    projects = gl.projects.list(iterator=True)\n    groups = gl.groups.list(iterator=True)\n\nprint(\"# List of all CI/CD variables marked as secret (instance, groups, projects)\")\n\n# https://python-gitlab.readthedocs.io/en/stable/gl_objects/variables.html\n\n# Instance variables (if the token has permissions)\nprint(\"Instance variables, if accessible\")\ntry:\n    for i_var in gl.variables.list(iterator=True):\n        eval_print_var(i_var)\nexcept:\n    print(\"No permission to fetch global instance variables, continueing without.\")\n    print(\"\\n\")\n\n# group variables (maintainer permissions for groups required)\nfor group in groups:\n    print(\"Group {n}, URL: {u}\".format(n=group.full_path, u=group.web_url))\n    for g_var in group.variables.list(iterator=True):\n        eval_print_var(g_var)\n\n    print(\"\\n\")\n\n# Loop over projects and print the settings\nfor project in projects:\n    # skip archived projects, they throw 403 errors\n    if project.archived:\n        continue\n\n    print(\"Project {n}, URL: {u}\".format(n=project.path_with_namespace, u=project.web_url))\n    for p_var in project.variables.list(iterator=True):\n        eval_print_var(p_var)\n\n    print(\"\\n\")\n```\n\nThe script intentionally does not print the variable values, this is left as an exercise for safe environments. The recommended way of storing secrets is to [use external providers](https://docs.gitlab.com/ee/ci/secrets/).\n\n### Download a file from the repository\n\nThe [script](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/get_raw_file_content.py) goal is download a file path from a specified branch name, and store its content in a new file.\n\n```python\n# Goal: Try to download README.md from https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/README.md\nFILE_NAME = 'README.md'\nBRANCH_NAME = 'main'\n\n# Search the file in the repository tree and get the raw blob\nfor f in project.repository_tree():\n    print(\"File path '{name}' with id '{id}'\".format(name=f['name'], id=f['id']))\n\n    if f['name'] == FILE_NAME:\n        f_content = project.repository_raw_blob(f['id'])\n        print(f_content)\n\n# Alternative approach: Get the raw file from the main branch\nraw_content = project.files.raw(file_path=FILE_NAME, ref=BRANCH_NAME)\nprint(raw_content)\n\n# Store the file on disk\nwith open('raw_README.md', 'wb') as f:\n    project.files.raw(file_path=FILE_NAME, ref=BRANCH_NAME, streamed=True, action=f.write)\n```\n\n### Migration help: List all certificate-based Kubernetes clusters\n\nThe certificate-based integration of Kubernetes clusters into GitLab [was deprecated](https://docs.gitlab.com/ee/update/deprecations.html#self-managed-certificate-based-integration-with-kubernetes). To help with migration plans, the inventory of existing groups and projects can be automated using the GitLab API.\n\n\n```python\ngroups = [ ]\n\n# get GROUP_ID group\ngroups.append(gl.groups.get(GROUP_ID))\n\nfor group in groups:\n    for sg in group.subgroups.list(include_subgroups=True, all=True):\n        real_group = gl.groups.get(sg.id)\n        groups.append(real_group)\n\ngroup_clusters = {}\nproject_clusters = {}\n\nfor group in groups:\n    #Collect group clusters\n    g_clusters = group.clusters.list()\n\n    if len(g_clusters) > 0:\n        group_clusters[group.id] = g_clusters\n\n    # Collect all projects in group and subgroups and their clusters\n    projects = group.projects.list(include_subgroups=True, all=True)\n\n    for project in projects:\n        # https://python-gitlab.readthedocs.io/en/stable/gl_objects/groups.html#examples\n        manageable_project = gl.projects.get(project.id)\n\n        # skip archived projects\n        if project.archived:\n            continue\n\n        p_clusters = manageable_project.clusters.list()\n\n        if len(p_clusters) > 0:\n            project_clusters[project.id] = p_clusters\n\n# Print summary\nprint(\"## Group clusters\\n\\n\")\nfor g_id, g_clusters in group_clusters.items():\n    url = gl.groups.get(g_id).web_url\n    print(\"Group ID {g_id}: {u}\\n\\n\".format(g_id=g_id, u=url))\n    print_clusters(g_clusters)\n\nprint(\"## Project clusters\\n\\n\")\nfor p_id, p_clusters in project_clusters.items():\n    url = gl.projects.get(p_id).web_url\n    print(\"Project ID {p_id}: {u}\\n\\n\".format(p_id=p_id, u=url))\n    print_clusters(p_clusters)\n```\n\nThe full script is available [here](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/list_cert_based_kubernetes_clusters.py).\n\n### Team efficiency: Check if existing merge requests need to be rebased after merging a huge refactoring MR\n\nThe [GitLab handbook](/handbook/) repository is a large monorepo with many merge requests created, reviewed, approved and merged. Some reviews take longer than others, and some merge requests touch multiple pages when renaming a string, or [all handbook pages](/handbook/about/#count-handbook-pages). The marketing handbook needed restructuring (think of code refactoring), and as such, many directories and paths were moved or renamed. [The issue tasks](https://gitlab.com/gitlab-com/www-gitlab-com/-/issues/13991#tasks) grew over time, and I was worried that other merge requests would run into conflicts after merging the huge changes. I remembered that the python-gitlab can fetch all merge requests in a given project, including details on the Git branch, source paths changed and much more.\n\nThe resulting script configures a list of source paths that are touched by all merge requests, and checks against the merge request diff with `mr.diffs.list()` and comparing if a pattern matches against the value in `old_path`. If a match is found, the script logs it, and saves the merge request in the `seen_mr` dictionary for the summary later. There are additional attributes collected to allow printing a Markdown task list with URLs for easier copy-paste into [issue descriptions](https://gitlab.com/gitlab-com/www-gitlab-com/-/issues/13991#additional-tasks). The full script is located [here](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/search_mr_contains_updated_path.py).\n\n\n```python\nPATH_PATTERNS = [\n    'path/to/handbook/source/page.md',\n]\n\n# Only list opened MRs\n# https://python-gitlab.readthedocs.io/en/stable/gl_objects/merge_requests.html#project-merge-requests\nmrs = project.mergerequests.list(state='opened', iterator=True)\n\nseen_mr = {}\n\nfor mr in mrs:\n    # https://docs.gitlab.com/ee/api/merge_requests.html#list-merge-request-diffs\n    real_mr = project.mergerequests.get(mr.get_id())\n    real_mr_id = real_mr.attributes['iid']\n    real_mr_url = real_mr.attributes['web_url']\n\n    for diff in real_mr.diffs.list(iterator=True):\n        real_diff = real_mr.diffs.get(diff.id)\n\n        for d in real_diff.attributes['diffs']:\n            for p in PATH_PATTERNS:\n                if p in d['old_path']:\n                    print(\"MATCH: {p} in MR {mr_id}, status '{s}', title '{t}' - URL: {mr_url}\".format(\n                        p=p,\n                        mr_id=real_mr_id,\n                        s=mr_status,\n                        t=real_mr.attributes['title'],\n                        mr_url=real_mr_url))\n\n                    if not real_mr_id in seen_mr:\n                        seen_mr[real_mr_id] = real_mr\n\nprint(\"\\n# MRs to update\\n\")\n\nfor id, real_mr in seen_mr.items():\n    print(\"- [ ] !{mr_id} - {mr_url}+ Status: {s}, Title: {t}\".format(\n        mr_id=id,\n        mr_url=real_mr.attributes['web_url'],\n        s=real_mr.attributes['detailed_merge_status'],\n        t=real_mr.attributes['title']))\n```\n\n\n## DevSecOps use cases for API write actions\n\nThe authenticated access token needs full [`api` scope](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#personal-access-token-scopes).\n\nThe following use cases are discussed:\n\n- [Move epics between groups](#move-epics-between-groups)\n- [Compliance: Ensure that project settings are not overridden](#compliance-ensure-that-project-settings-are-not-overridden)\n- [Taking notes, generate due date overview](#taking-notes-generate-due-date-overview)\n- [Create issue index in a Markdown file, grouped by labels](#create-issue-index-in-a-markdown-file-grouped-by-labels)\n\n### Move epics between groups\n\nSometimes it is necessary to move epics, similar to issues, into a different group. A question in the GitLab marketing Slack channel inspired me to look into a [feature proposal for the UI](https://gitlab.com/gitlab-org/gitlab/-/issues/12689), [quick actions](/blog/improve-your-gitlab-productivity-with-these-10-tips/), and later, thinking about writing an API script to automate the steps. The idea is simple: Move an epic from a source group to a target group, and copy its title, description and labels. Since epics allow to group issues, they need to be reassigned to the target epic, too. Parent-child epic relationships need to be taken into account to: All child epics of the source epics need to be reassigned to the target epic.\n\nThe following script looks up all source [epic attributes](https://python-gitlab.readthedocs.io/en/stable/gl_objects/epics.html) first, and then creates a new target epic with minimal attributes: title and description. The labels list is copied and the changes are persisted with the `save()` call. The issues assigned to the epic need to be re-created in the target epic. The `create()` call actually creates the relationship item, not a new issue object itself. The child epics move requires a different approach, since the relationship is vice versa: The `parent_id` on the child epic needs to be compared against the source epic ID, and if matching, updated to the target epic ID. After copying everything successfully, the source epic needs to be changed into the `closed` state.\n\n\n```python\n#!/usr/bin/env python\n\n# Description: Show how epics can be moved between groups, including title, description, labels, child epics and issues.\n# Requirements: python-gitlab Python libraries. GitLab API write access, and maintainer access to all configured groups/projects.\n# Author: Michael Friedrich \u003Cmfriedrich@gitlab.com>\n# License: MIT, (c) 2023-present GitLab B.V.\n\nimport gitlab\nimport os\nimport sys\n\nGITLAB_SERVER = os.environ.get('GL_SERVER', 'https://gitlab.com')\n# https://gitlab.com/gitlab-de/use-cases/gitlab-api\nSOURCE_GROUP_ID = os.environ.get('GL_SOURCE_GROUP_ID', 62378643)\n# https://gitlab.com/gitlab-de/use-cases/gitlab-api/epic-move-target\nTARGET_GROUP_ID = os.environ.get('GL_TARGET_GROUP_ID', 62742177)\n# https://gitlab.com/groups/gitlab-de/use-cases/gitlab-api/-/epics/1\nEPIC_ID = os.environ.get('GL_EPIC_ID', 1)\nGITLAB_TOKEN = os.environ.get('GL_TOKEN')\n\nif not GITLAB_TOKEN:\n    print(\"Please set the GL_TOKEN env variable.\")\n    sys.exit(1)\n\ngl = gitlab.Gitlab(GITLAB_SERVER, private_token=GITLAB_TOKEN)\n\n# Main\n# Goal: Move epic to target group, including title, body, labels, and child epics and issues.\nsource_group = gl.groups.get(SOURCE_GROUP_ID)\ntarget_group = gl.groups.get(TARGET_GROUP_ID)\n\n# Create a new target epic and copy all its items, then close the source epic.\nsource_epic = source_group.epics.get(EPIC_ID)\n# print(source_epic) #debug\n\nepic_title = source_epic.title\nepic_description = source_epic.description\nepic_labels = source_epic.labels\nepic_issues = source_epic.issues.list()\n\n# Create the epic with minimal attributes\ntarget_epic = target_group.epics.create({\n    'title': epic_title,\n    'description': epic_description,\n})\n\n# Assign the list\ntarget_epic.labels = epic_labels\n\n# Persist the changes in the new epic\ntarget_epic.save()\n\n# Epic issues need to be re-assigned in a loop\nfor epic_issue in epic_issues:\n    ei = target_epic.issues.create({'issue_id': epic_issue.id})\n\n# Child epics need to update their parent_id to the new epic\n# Need to search in all epics, use lazy object loading\nfor sge in source_group.epics.list(lazy=True):\n    # this epic has the source epic as parent epic?\n    if sge.parent_id == source_epic.id:\n        # Update the parent id\n        sge.parent_id = target_epic.id\n        sge.save()\n\nprint(\"Copied source epic {source_id} ({source_url}) to target epic {target_id} ({target_url})\".format(\n    source_id=source_epic.id, source_url=source_epic.web_url,\n    target_id=target_epic.id, target_url=target_epic.web_url))\n\n# Close the old epic\nsource_epic.state_event = 'close'\nsource_epic.save()\nprint(\"Closed source epic {source_id} ({source_url})\".format(\n    source_id=source_epic.id, source_url=source_epic.web_url))\n\n```\n\n\n```shell\n$  python3 move_epic_between_groups.py\nCopied source epic 725341 (https://gitlab.com/groups/gitlab-de/use-cases/gitlab-api/-/epics/1) to target epic 725358 (https://gitlab.com/groups/gitlab-de/use-cases/gitlab-api/epic-move-target/-/epics/6)\nClosed source epic 725341 (https://gitlab.com/groups/gitlab-de/use-cases/gitlab-api/-/epics/1)\n```\n\n\nThe [target epic](https://gitlab.com/groups/gitlab-de/use-cases/gitlab-api/epic-move-target/-/epics/5) was created and shows the expected result: Same title, description, labels, child epic, and issues.\n\n![Target epic which has all attributes copied from the source epic: title, description, labels, child epics, issues](/images/blogimages/efficient-devsecops-workflows-python-gitlab-handson/python_gitlab_moved_epic_with_all_attributes.png){: .shadow}\n\n**Exercise**: The script does not copy [comments](https://python-gitlab.readthedocs.io/en/stable/gl_objects/notes.html) and [discussion threads](https://python-gitlab.readthedocs.io/en/stable/gl_objects/discussions.html) yet. Research and help update the script – merge requests welcome!\n\n\n### Compliance: Ensure that project settings are not overridden\n\nProject and group settings may be accidentally changed by team members with maintainer permissions. Compliance requirements need to be met. Another use case is to manage configuration with Infrastructure as Code tools, and ensure that GitLab instance/group/project/etc. configuration is persisted and always the same. Tools like Ansible or Terraform can invoke an API script, or use the python-gitlab library to perform tasks to manage settings.\n\nThe following example only has the `main` branch protected.\n\n![GitLab project settings for repositories and protected branches, main branch](/images/blogimages/efficient-devsecops-workflows-python-gitlab-handson/python_gitlab_protected_branches_settings_main.png){: .shadow}\n\nLet us assume that a new `production` branch has been added and should be protected, too. The following [script](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/enforce_protected_branches.py) defines the dictionary of protected branches and their access levels for push/merge permissions to maintainer level, and builds the comparison logic around the [python-gitlab protected branches documentation](https://python-gitlab.readthedocs.io/en/stable/gl_objects/protected_branches.html).\n\n\n```python\n#!/usr/bin/env python\n\nimport gitlab\nimport os\nimport sys\n\nGITLAB_SERVER = os.environ.get('GL_SERVER', 'https://gitlab.com')\n# https://gitlab.com/gitlab-de/use-cases/\nGROUP_ID = os.environ.get('GL_GROUP_ID', 16058698)\nGITLAB_TOKEN = os.environ.get('GL_TOKEN')\n\nPROTECTED_BRANCHES = {\n    'main': {\n        'merge_access_level': gitlab.const.AccessLevel.MAINTAINER,\n        'push_access_level': gitlab.const.AccessLevel.MAINTAINER\n    },\n    'production': {\n        'merge_access_level': gitlab.const.AccessLevel.MAINTAINER,\n        'push_access_level': gitlab.const.AccessLevel.MAINTAINER\n    },\n}\n\nif not GITLAB_TOKEN:\n    print(\"Please set the GL_TOKEN env variable.\")\n    sys.exit(1)\n\ngl = gitlab.Gitlab(GITLAB_SERVER, private_token=GITLAB_TOKEN)\n\n# Main\ngroup = gl.groups.get(GROUP_ID)\n\n# Collect all projects in group and subgroups\nprojects = group.projects.list(include_subgroups=True, all=True)\n\nfor project in projects:\n    # Retrieve a full manageable project object\n    # https://python-gitlab.readthedocs.io/en/stable/gl_objects/groups.html#examples\n    manageable_project = gl.projects.get(project.id)\n\n    # https://python-gitlab.readthedocs.io/en/stable/gl_objects/protected_branches.html\n    protected_branch_names = []\n\n    for pb in manageable_project.protectedbranches.list():\n        manageable_protected_branch = manageable_project.protectedbranches.get(pb.name)\n        print(\"Protected branch name: {n}, merge_access_level: {mal}, push_access_level: {pal}\".format(\n            n=manageable_protected_branch.name,\n            mal=manageable_protected_branch.merge_access_levels,\n            pal=manageable_protected_branch.push_access_levels\n        ))\n\n        protected_branch_names.append(manageable_protected_branch.name)\n\n    for branch_to_protect, levels in PROTECTED_BRANCHES.items():\n        # Fix missing protected branches\n        if branch_to_protect not in protected_branch_names:\n            print(\"Adding branch {n} to protected branches settings\".format(n=branch_to_protect))\n            p_branch = manageable_project.protectedbranches.create({\n                'name': branch_to_protect,\n                'merge_access_level': gitlab.const.AccessLevel.MAINTAINER,\n                'push_access_level': gitlab.const.AccessLevel.MAINTAINER\n            })\n```\n\nRunning the script prints the existing `main` branch, and a note that `production` will be updated. The screenshot from the repository settings proves this action.\n\n```\n$ python3 enforce_protected_branches.py                                                ─╯\nProtected branch name: main, merge_access_level: [{'id': 67294702, 'access_level': 40, 'access_level_description': 'Maintainers', 'user_id': None, 'group_id': None}], push_access_level: [{'id': 68546039, 'access_level': 40, 'access_level_description': 'Maintainers', 'user_id': None, 'group_id': None}]\nAdding branch production to protected branches settings\n```\n\n![GitLab project settings for repositories and protected branches, main and production branch](/images/blogimages/efficient-devsecops-workflows-python-gitlab-handson/python_gitlab_protected_branches_settings_main_production.png){: .shadow}\n\n\n### Taking notes, generate due date overview\n\nA [Hacker News discussion about note-taking tools](https://news.ycombinator.com/item?id=32155848) inspired me to take a look into creating a Markdown table overview, fetched from files that take notes, and sorted by the parsed due date. The script is located [here](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/generate_snippets_index_by_due_date.py) and more complex to understand.\n\n```\n# 2022-07-19 Notes\n\nHN topic about taking notes: https://news.ycombinator.com/item?id=32152935\n\n\u003C!--\n---\nTags: DevOps, Learn\nDue: 2022-08-01\n---\n-->\n\n```\n\n### Create issue index in a Markdown file, grouped by labels\n\nA similar Hacker News question inspired me to write a [script](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/generate_issue_index_grouped_by_label.py) that parses all issues in a GitLab project by labels, and creates or updates a Markdown index file in the same repository. The issues are grouped by label.\n\nFirst, the issues are fetched from the project, including all labels, and stored in the `index` dictionary.\n\n```python\np = gl.projects.get(PROJECT_ID)\n\nlabels = p.labels.list()\n\nindex={}\n\nfor i in p.issues.list():\n    for l in i.labels:\n        if l not in index:\n            index[l] = []\n\n        index[l].append(\"#{id} - {title}\".format(id=i.id, title=i.title))\n```\n\nThe second step is to create a Markdown formatted listing based on the collected index data, with the label name as key, holding a list of issue strings.\n\n```python\nindex_str = \"\"\"# Issue Overview\n_Grouped by issue labels._\n\"\"\"\n\nfor l_name, i_list in index.items():\n    index_str += \"\\n## {label} \\n\\n\".format(label=l_name)\n\n    for i in i_list:\n        index_str += \"- {title}\\n\".format(title=i)\n```\n\nThe last step is to create a new file in the repository, or update an existing one. This is a little tricky because the API expects you to define the action and will throw an error if you try to update a nonexistent file. The first condition checks whether the file path exists in the repository, and then defines the `action` attribute. The `data` dictionary gets built, with the final `commits.create()` method called.\n\n```python\n# Dump index_str to FILE_NAME\n# Create as new commit\n# See https://docs.gitlab.com/ce/api/commits.html#create-a-commit-with-multiple-files-and-actions\n# for actions detail\n\n# Check if file exists, and define commit action\nf = p.files.get(file_path=FILE_NAME, ref=REF_NAME)\nif not f:\n    action='create'\nelse:\n    action='update'\n\ndata = {\n    'branch': REF_NAME,\n    'commit_message': 'Generate new index, {d}'.format(d=date.today()),\n    'actions': [\n        {\n            'action': action,\n            'file_path': FILE_NAME,\n            'content': index_str\n        }\n    ]\n}\n\ncommit = p.commits.create(data)\n```\n\n## Advanced DevSecOps workflows\n\n- [Container images to run API scripts](#container-images-to-run-api-scripts)\n- [CI/CD integration: Release and changelog generation](#cicd-integration-release-and-changelog-generation)\n- [CI/CD integration: Pipeline report summaries](#cicd-integration-pipeline-report-summaries)\n\n### Container images to run API scripts\n\nInstalling the Python interpreter and dependent libraries into the operating system may not always work, or it may be a barrier to using the API scripts. A container image that can be pulled from the GitLab registry is a good first step towards more DevSecOps automation and future CI/CD integrations, and provides a tested environment. The python-gitlab project [provides container images](https://python-gitlab.readthedocs.io/en/stable/index.html#using-the-docker-images) which can be used for testing.\n\nThe cloned script repository can be mounted into the container, and the settings are configured using environment variables. Example with Docker CLI:\n\n```shell\n$ docker run -ti -v \"`pwd`:/app\" \\\n  -e \"GL_SERVER=http://gitlab.com\" \\\n  -e \"GL_TOKEN=$GITLAB_TOKEN\" \\\n  -e \"GL_GROUP_ID=16058698\" \\\nregistry.gitlab.com/python-gitlab/python-gitlab:slim-bullseye \\\npython /app/python_gitlab_manageable_objects.py\n```\n\n### CI/CD integration: Release and changelog generation\n\nCreating a Git tag and a release in GitLab often requires a changelog attached. This provides a summary into all Git commits, all merged merge requests, or something similar that is easier to consume for everyone interested in the changes in this new release. Automating the changelog generation in CI/CD pipelines is possible using the GitLab API. The simplest list uses the Git commit history shown in the [`create_simple_changelog_from_git_history.py`](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/create_simple_changelog_from_git_history.py) script below:\n\n\n```python\nproject = gl.projects.get(PROJECT_ID)\ncommits = project.commits.list(ref_name='main', lazy=True, iterator=True)\n\nprint(\"# Changelog\")\n\nfor commit in commits:\n    # Generate a markdown formatted list with URLs\n    print(\"- [{text}]({url}) ({name})\".format(text=commit.title, url=commit.web_url, name=commit.author_name))\n```\n\nExecuting the script on the [o11y.love project](https://gitlab.com/everyonecancontribute/observability/o11y.love) will print a Markdown list with URLs.\n\n```shell\n$ python3 create_changelog_from_git_history.py\n# Changelog\n- [Merge branch 'topics-ebpf-opentelemetry' into 'main'](https://gitlab.com/everyonecancontribute/observability/o11y.love/-/commit/75df97e13e0f429803dc451aac7fee080a51f44c) (Michael Friedrich)\n- [Move eBPF/OpenTelemetry into dedicated topics pages ](https://gitlab.com/everyonecancontribute/observability/o11y.love/-/commit/8fa4233630ff8c1d65aff589bd31c4c2f5df36cb) (Michael Friedrich)\n- [Merge branch 'workshop-add-k8s-o11y-toc' into 'main'](https://gitlab.com/everyonecancontribute/observability/o11y.love/-/commit/8b7949b19af6aa6bf25f73ca1ffe8616a7dbaa00) (Michael Friedrich)\n- [Add TOC for Kubesimplify Kubernetes Observability workshop ](https://gitlab.com/everyonecancontribute/observability/o11y.love/-/commit/63c8ad587f43e3926e6749a62c33ad0b6f229f47) (Michael Friedrich)\n\n...\n```\n\n**Exercise**: The script is not production ready yet but should get you going to group by commits by Git tag/release, filter merge commits, attach the changelog file or content into the [GitLab release details](https://docs.gitlab.com/ee/api/releases/), etc.\n\n### CI/CD integration: Pipeline report summaries\n\nWhen developing new API script in Python, a CI/CD integration with automated runs can be desired, too. My recommendation is to focus on writing and testing the script stand-alone on the command line first, and once it works reliably, adapt the code to run the script to perform actions in CI/CD, too. After writing a few scripts, and practicing a lot, you will have learned to write code that can be executed on the CLI, in containers and in CI/CD jobs.\n\nA good preparation for CI/CD is to focus on environment variables to configure the script. The environment variables can be defined as CI/CD variables, and there is no extra work with additional configuration files, or command line parameters involved. This keeps the CI/CD configuration footprint small and reusable, too.\n\nAn example integration to automatically create security summaries as markdown comment in a merge request was described in the [\"Fantastic Infrastructure-as-Code security attacks and how to find them\" blog post](/blog/fantastic-infrastructure-as-code-security-attacks-and-how-to-find-them/#integrations-into-cicd-and-merge-requests-for-review). This use case required research and testing before actually writing the full API script:\n\n1. Read the python-gitlab documentation to learn how [merge request comments (notes)](https://python-gitlab.readthedocs.io/en/stable/gl_objects/notes.html#project-notes) can be created.\n2. Create a test project and a test merge request for testing.\n3. Start writing code which instantiates the GitLab connection object, fetches the project object, and gets the merge request object from a pre-defined ID.\n4. Run `mr.notes.create({‘body’: ‘This is a test by dnsmichi’})`\n5. Iterate on the body content and pre-fill a string with a markdown table.\n6. Fetch pre-defined CI/CD variables to get the `CI_MERGE_REQUEST_ID` value which will be required to update as target.\n6. Verify the API permissions and learn that the CI job token is not sufficient.\n7. Implement the full algorithm, integrated CI/CD testing and add documentation.\n\nThe script runs continuously after security scans have been completed with a report. Another use case can be using [Pipeline schedules](https://docs.gitlab.com/ee/ci/pipelines/schedules.html) which provide synchronization capabilities, and the comments get posted to an issue summary.\n\n## Development tips\n\nCode and abstraction libraries are helpful but sometimes it can be hard to see the problem why an attribute or object does not provide the expected behavior. It is helpful to take a step back, and look into different ways to fetch data from the REST API, for example [using jq and curl](/blog/devops-workflows-json-format-jq-ci-cd-lint/). The [GitLab CLI](/blog/introducing-the-gitlab-cli/) can also be used to query the API and get immediate results.\n\nDeveloping scripts that interact with APIs can become a repetitive task, adding more needed attributes, and the need to learn about object relations, methods and how to store the retrieved data. Especially for larger datasets, it can be a good idea to use the JSON library to dump data structures into a file cache on disk, and provide a debug configuration option to read the data from that file, instead of firing the API requests again all the time. This also helps to mitigate potential rate limiting.\n\nAdding timing points to the code can help measure the performance, and efficiency of the algorithm used. The following snippet [measures the duration](https://stackoverflow.com/questions/7370801/how-do-i-measure-elapsed-time-in-python ) of requests to retrieve the merge request status. It is part of a script that was used to analyze a potential problem with the `detailed_merge_status` attribute in [this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/386661#note_1237757295).\n\n```\nmrs = project.mergerequests.list(state='opened', iterator=True, with_merge_status_recheck=True)\n\nfor mr in mrs:\n    start = timer()\n    #print(mr.attributes) #debug\n    # https://docs.gitlab.com/ee/api/merge_requests.html#list-merge-request-diffs\n    real_mr = project.mergerequests.get(mr.get_id())\n\n    print(\"- [ ] !{mr_id} - {mr_url}+ Status: {s}, Title: {t}\".format(\n        mr_id=real_mr.attributes['iid'],\n        mr_url=real_mr.attributes['web_url'],\n        s=real_mr.attributes['detailed_merge_status'],\n        t=real_mr.attributes['title']))\n\n    end = timer()\n    duration = end - start\n    if duration > 1.0:\n        print(\"ALERT: > 1s \")\n    print(\"> Execution time took {s}s\".format(s=(duration)))\n```\n\nMore tips are discussed in the following sections:\n\n- [Advanced custom configuration](#advanced-custom-configuration)\n- [CI/CD code linting for different Python versions](#cicd-code-linting-for-different-python-versions)\n\n### Advanced custom configuration\n\nWhen you are developing a script that requires advanced custom configuration, choose a format that fits best into existing infrastructure and development guidelines. Python provides libraries for parsing YAML, JSON, etc. The following example configuration file and script showcase a YAML configuration option. It is based on [a script that automatically updates a list of issues/epics](https://gitlab.com/gitlab-de/gitlab-api-automated-commenter) with a comment, reminding responsible team members for a recurring update for a cross-functional initiative at GitLab.\n\n[python_gitlab_custom_yaml_config.yml](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/python_gitlab_custom_yaml_config.yml)\n```yaml\ntasks:\n  - name: \"Backend\"\n    url: \"https://gitlab.com/group1/project2/-/issues/1\"\n  - name: \"Frontend\"\n    url: \"https://gitlab.com/group2/project4/-/issues/2\"\n```\n\n[python_gitlab_custom_script_config_yaml.py](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/python_gitlab_custom_script_config_yaml.py)\n```python\nimport os\nimport yaml\n\nCONFIG_FILE = os.environ.get('GL_CONFIG_FILE', \"python_gitlab_custom_yaml_config.yml\")\n\n# Read config\nwith open(CONFIG_FILE, mode=\"rt\", encoding=\"utf-8\") as file:\n    config = yaml.safe_load(file)\n    #print(config) #debug\n\ntasks = []\nif \"tasks\" in config:\n    tasks = config['tasks']\n\n# Process the tasks\nfor task in tasks:\n    print(\"Task name: '{n}' Issue URL to update: {id}\".format(n=task['name'], id=task['url']))\n    # print(task) #debug\n```\n\n```shell\n$ python3 python_gitlab_custom_script_config_yaml.py                                     ─╯\nTask name: 'Backend' Issue URL to update: https://gitlab.com/group1/project2/-/issues/1\nTask name: 'Frontend' Issue URL to update: https://gitlab.com/group2/project4/-/issues/2\n```\n\n\n### CI/CD code linting for different Python versions\n\nAll code examples in this blog post have been tested with Python 3.8, 3.9, 3.10 and 3.11, using [parallel matrix builds in GitLab CI/CD](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/.gitlab-ci.yml) and pyflakes for code linting. Automating the tests helps focus on development, and ensuring that the target platforms support the language features. Some Linux distributions do not provide Python 3.11 yet for example, and Python language features cannot be used or may need an alternative implementation.\n\n```yaml\ninclude:\n  - template: Security/SAST.gitlab-ci.yml\n  - template: Dependency-Scanning.gitlab-ci.yml\n  - template: Secret-Detection.gitlab-ci.yml\n\nstages:\n  - lint\n  - test\n\n.python-req:\n  image: python:$VERSION\n  script:\n    - pip install -r requirements_dev.txt\n  parallel:\n    matrix:\n      - VERSION: ['3.8', '3.9', '3.10', '3.11']   # https://hub.docker.com/_/python\n\nlint-python:\n  extends: .python-req\n  stage: lint\n  script:\n    - !reference [.python-req, script]\n    - pyflakes .\n\nsast:\n  stage: test\n\n```\n\n## Optimize code and performance\n\n- [Lazy objects](#lazy-objects)\n- [Object-oriented programming](#object-oriented-programming)\n\n### Lazy objects\n\nWhen working with objects that do not immediately need all attributes loaded, you can specify the [`lazy=True`](https://python-gitlab.readthedocs.io/en/stable/api-usage.html#lazy-objects) attribute to not invoke an API call immediately. A follow-up method call will then invoke the required API calls.\n\n\n```python\n# Lazy object, no API call\nproject = gl.projects.get(PROJECT_ID, lazy=True)\n\ntry:\n    print(\"Trying to access 'snippets_enabled' on a lazy loaded project object. This will throw an exception that we capture.\")\n    print(\"Project settings: snippets_enabled={b}\".format(b=project.snippets_enabled))\nexcept Exception as e:\n    print(\"Accessing lazy loaded object failed: {e}\".format(e=e))\n\nproject.snippets_enabled = True\n\nproject.save() # This creates an API call\n\nprint(\"\\nLazy object was loaded after save() call.\")\nprint(\"Project settings: snippets_enabled={b}\".format(b=project.snippets_enabled))\n\n```\n\nExecuting the [`python_gitlab_lazy_objects.py`](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/python_gitlab_lazy_objects.py) script shows that the lazy object did not fire an API call, thus throwing an exception when accessing the project setting `snippets_enabled`. To show that the object still can be managed, the code catches the exception to proceed with updating the setting locally, and calling `project.save()` to persist the change and call the API update.\n\n```shell\n$ python3 python_gitlab_lazy_objects.py                                                ─╯\nTrying to access 'snippets_enabled' on a lazy loaded project object. This will throw an exception that we capture.\nAccessing lazy loaded object failed: 'Project' object has no attribute 'snippets_enabled'\n\nIf you tried to access object attributes returned from the server,\nnote that \u003Cclass 'gitlab.v4.objects.projects.Project'> was created as\na `lazy` object and was not initialized with any data.\n\nLazy object was loaded after save() call.\nProject settings: snippets_enabled=True\n```\n\n### Object-oriented programming\n\nFor better code quality, it makes sense to follow object-oriented programming and create classes that store attributes, provide methods, and enable better unit testing. The [storage analyzer tool](https://gitlab.com/gitlab-de/gitlab-storage-analyzer) was developed to create a summary of projects that consume lots storage, for example CI/CD job artifacts. By inspecting the [Git history](https://gitlab.com/gitlab-de/gitlab-storage-analyzer/-/commits/main), you can learn from the different iterations to a first working version.\n\nThe following example is a trimmed version which shows how to initialize the class `GitLabUseCase`, add helper functions for logging and JSON pretty-printing, and print all project attributes.\n\n```python\n#!/usr/bin/env python\n\nimport gitlab\nimport os\nimport sys\nimport json\n\n# Print an error message with prefix, and exit immediately with an error code.\ndef error(text):\n    logger(\"ERROR\", text)\n    sys.exit(1)\n\n# Log a line with a given prefix (e.g. INFO)\ndef logger(prefix, text):\n    print(\"{prefix}: {text}\".format(prefix=prefix, text=text))\n\n# Return a pretty-printed JSON string with indent of 4 spaces\ndef render_json_output(data):\n    return json.dumps(data, indent=4, sort_keys=True)\n\n# Class definition\nclass GitLabUseCase(object):\n    # Initializer to set all required parameters\n    def __init__(self, verbose, gl_server, gl_token, gl_project_id):\n        self.verbose = verbose\n        self.gl_server = gl_server\n        self.gl_token = gl_token\n        self.gl_project_id = gl_project_id\n\n    # Debug logger, controlled via verbose parameter\n    def log_debug(self, text):\n        if self.verbose:\n            print(\"DEBUG: {d}\".format(d=text))\n\n    # Connect to the GitLab server and store the connection handle\n    def connect(self):\n        self.log_debug(\"Connecting to GitLab API at {s}\".format(s=self.gl_server))\n        # Supports personal/project/group access token\n        # https://docs.gitlab.com/ee/api/index.html#personalprojectgroup-access-tokens\n        self.gl = gitlab.Gitlab(self.gl_server, private_token=self.gl_token)\n\n    # Use the stored connection handle to fetch a project object by id,\n    # and print its attribute with JSON pretty-print.\n    def print_project_attributes(self):\n        project = self.gl.projects.get(self.gl_project_id)\n        print(render_json_output(project.attributes))\n\n\n## main\nif __name__ == '__main__':\n    # Fetch configuration from environment variables.\n    # The second parameter specifies the default value when not provided.\n    gl_verbose = os.environ.get('GL_VERBOSE', False)\n    gl_server = os.environ.get('GL_SERVER', 'https://gitlab.com')\n\n    gl_token = os.environ.get('GL_TOKEN')\n\n    if not gl_token:\n        error(\"Please specifiy the GL_TOKEN env variable\")\n\n    gl_project_id = os.environ.get('GL_PROJECT_ID', 42491852) # https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python\n\n    # Instantiate new object and run methods\n    gl_use_case = GitLabUseCase(gl_verbose, gl_server, gl_token, gl_project_id)\n    gl_use_case.connect()\n    gl_use_case.print_project_attributes()\n```\n\nRunning the [script](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/python_gitlab_oop_helpers.py) with the `GL_PROJECT_ID` environment variable pretty-prints the project attributes as JSON on the terminal.\n\n![Example script that pretty-prints the project object attributes as JSON](/images/blogimages/efficient-devsecops-workflows-python-gitlab-handson/python_gitlab_oop_example_terminal_output_project_attributes.png){: .shadow}\n\n## More use cases\n\nBetter performance with API requests can be achieved by looking into parallelization and threading in Python. Users have been testing the storage analyzer script, and provided feedback to optimize the performance for the single-threaded script by using tasks and [Python threading](https://realpython.com/intro-to-python-threading/), similar to [this community project](https://gitlab.com/thelabnyc/gitlab-storage-cleanup). I might follow up on this topic in a future blog post, there are many more great use cases to cover using python-gitlab.\n\nThere is so much more to learn, here are a few examples from the GitLab community forum that could not make it into this blog post:\n\n* [Fetch review app environment URL from Merge Request](https://forum.gitlab.com/t/fetch-review-app-environment-url-from-merge-request/71335/2)\n* [Project visibility, project features, permissions](https://forum.gitlab.com/t/project-visibility-project-features-permissions-settings-api/32242)\n* [Download GitLab CI/CD job artifacts using Python](https://forum.gitlab.com/t/download-gitlab-ci-jobs-artifacts-using-python/25436/$)\n\n## Conclusion\n\nThe python-gitlab library helps to abstract raw REST API calls, and to keep access to attributes, functions and objects short and relatively easy. There are many use cases that can be solved efficiently. Alternative programming language libraries for the GitLab REST API are available [in the API clients section here](/partners/technology-partners/#api-clients).\n\nThe [GitLab Community Forum](https://forum.gitlab.com/) is a great place to collaborate on use cases and questions about possible solutions or code snippets. We'd love to hear from you about your use cases and challenges using the python-gitlab library.\n\nShoutout to the python-gitlab maintainers and contributors, developing this fantastic API library for many years now! If this blog post and the python-gitlab library helped you get more efficient, please consider [contributing to python-gitlab](https://python-gitlab.readthedocs.io/en/stable/#contributing). When there is a GitLab API feature missing, look into [contributing to GitLab](https://about.gitlab.com/community/contribute/), too. Thank you!\n\n\nCover image by [David Clode](https://unsplash.com/@davidclode) on [Unsplash](https://unsplash.com/photos/cxMJYcuCLEA)\n{: .note}",[232,726,725,480],{"slug":2037,"featured":6,"template":678},"efficient-devsecops-workflows-hands-on-python-gitlab-api-automation","content:en-us:blog:efficient-devsecops-workflows-hands-on-python-gitlab-api-automation.yml","Efficient Devsecops Workflows Hands On Python Gitlab Api Automation","en-us/blog/efficient-devsecops-workflows-hands-on-python-gitlab-api-automation.yml","en-us/blog/efficient-devsecops-workflows-hands-on-python-gitlab-api-automation",{"_path":2043,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2044,"content":2050,"config":2056,"_id":2058,"_type":16,"title":2059,"_source":17,"_file":2060,"_stem":2061,"_extension":20},"/en-us/blog/how-start-ospo-ten-minutes-using-gitlab",{"title":2045,"description":2046,"ogTitle":2045,"ogDescription":2046,"noIndex":6,"ogImage":2047,"ogUrl":2048,"ogSiteName":692,"ogType":693,"canonicalUrls":2048,"schema":2049},"Start an open source center of excellence in 10 minutes using GitLab","Launch your own open source program office using the OSPO Alliance's tools on GitLab","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682593/Blog/Hero%20Images/opensign.jpg","https://about.gitlab.com/blog/how-start-ospo-ten-minutes-using-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Start an open source center of excellence in 10 minutes using GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Boris Baldassari\"}],\n        \"datePublished\": \"2023-01-30\",\n      }",{"title":2045,"description":2046,"authors":2051,"heroImage":2047,"date":2053,"body":2054,"category":14,"tags":2055},[2052],"Boris Baldassari","2023-01-30","\nNow that open source has finally become a mainstream topic of conversation in the software industry, many organizations are increasingly curious about best practices for consuming, using, managing, and contributing to open source software projects. Open source software can seem alien and intimidating for organizations unfamiliar with it, and participating meaningfully and effectively in the open source ecosystem can be challenging.\n\nOrganizations especially serious about working in open source have formed [open source program offices](https://opensource.com/business/16/5/whats-open-source-program-office) (OSPOs) to spearhead their efforts. These offices are centers of excellence for an organization's ongoing work in open source. They help the organization realize the benefits of working with open source communities to accelerate innovation and build more secure tools.\n\nPerhaps your organization is considering establishing an OSPO. If it is, you likely have questions about how to get started – and especially about the best ways to help your organization become a valuable participant in the open source ecosystem.\n\nThe [OSPO Alliance](https://ospo.zone/) can help. Formed in 2021, the OSPO Alliance connects [experienced open source practitioners](https://ospo.zone/membership/) with organizations in need of seasoned guides to the open source world. Since the organization's founding, its members have composed a corpus of best open source practices called the [Good Governance Initiative Handbook](https://ospo.zone/ggi/), which explores various legal, cultural, and strategic considerations organizations face when working with open source software (and, naturally, the handbook itself is openly licensed, so anyone can contribute to it).\n\nTo celebrate the launch of the GGI Handbook Version 1.1, the OSPO Alliance went a step further: We have released the [MyGGI project](https://gitlab.ow2.org/ggi/my-ggi-board), which allows organizations to quickly create the infrastructure for their own open source program offices using GitLab.\n\nNow, let's look at what the MyGGI project can help your organization accomplish, including how to use the tool to establish an OSPO built on GGI principles — in only 10 minutes.\n\n## Working with the GGI Handbook\n\nThe GGI Handbook defines 25 activities, or best practices, organized according to various goals an organization may seek to accomplish with open source. Examples of activities include recommendations like \"Manage open source skills and resources,\" \"Manage software dependencies,\" \"Upstream first,\" or \"Engage with open source projects.\" Each of these activities, then, has a corresponding description and rationale, and the handbook provides resources, tools, and hints for successfully implementing them.\n\nActivites are intentionally generic and must be adapted to your organization's specific, unique, local context. The GGI Hanbook offers tools for doing this, too: scorecards. Scorecards allow you to assess your organization's engagement in and progress with various open source best practices.\n\nSo working with the GGI Handbook in your organization might look something like this:\n\n1. Evaluate the open source-related activities the handbook proposes and remove those that don't fit your specific context (maybe some activities will require a bit of adptation to be more relevant to the domain, while some others may just be discarded).\n1. Identify the activities that would be most beneficial to reaching your organization's goals in engaging with open source.\n1. Construct an Agile-like, iterative process for working on a small set of these activities. Do this in the form of sprints by tracking your progress with scorecards, and adapt the activity to your local context, team cultures, and available resources as you go.\n1. At the end of each iteration, review the activities your teams have completed, select a new scope for improvement, and repeat the process.\n\nThe MyGGI project provides a push-button infrastructure for doing this work. Next, let's examine how to deploy it on GitLab.\n\n### Deploying the GGI Handbook on GitLab\n\nThe OSPO Alliance wanted to provide a quick and straightforward way for organizations to establish their own open source program activities using a dashboard, so they can start implementing the GGI Handbook's methods without delay. We didn't want to reinvent the wheel with some heavy custom tooling. Instead, we decided to build the project using tools already available to us. We had already used GitLab issues to model activities during the early stages of handbook development, so reusing this GitLab feature made most sense. By simply adding some scripting to automate the initialization of activities and updating a static website on GitLab Pages, we were able to launch the project so others could easily deploy it in their own GitLab instances.\n\nInstructions for deploying the program are available in the project's [README](https://gitlab.ow2.org/ggi/my-ggi-board/-/blob/main/README.md). Let's review them here and start your own OSPO together.\n\nFirst, we need to create a new project on our GitLab instance. Select `Import project`, then `From repository by URL`. \n\nNext, we will need to provide a remote URL. Copy the existing MyGGI project by using the [URL](https://gitlab.com/gitlab-com/marketing/community-relations/open-source-program/gitlab-open-source-partners/publications-and-presentations/-/tree/main) `[https://gitlab.ow2.org/ggi/my-ggi-board.git](https://gitlab.ow2.org/ggi/my-ggi-board.git)`.\n\nThen we will give our project a unique name and choose a visibility level. Here's an example of how it might look:\n\n![Repository by URL](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-1.png){: .shadow}\n\nWhen you have configured your desired settings, click `Create project` to continue.\n\nOur next step is to configure access privileges. Go to `Project Settings > Access Tokens` and create a `Project Access Token` with `API` privilege and `Maintainer` role. The project's scripts will use these to create the issues and generate the static website dashboard for your OSPO.\n\nWhen the token is created, copy it to a safe place, as **you will never be able to see it again**. Note that some GitLab instances prefer to disable the Project Access Token feature in favor of Personal Access Tokens. This is perfectly okay; the preference won't affect the deployment of this project (see the instructions for more details).\n\nHere's an example of what you will see at this stage:\n\n![Project access tokens](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-2.png){: .shadow}\n\nWe will then provide this access token to the pipelines and scripts by creating a CI/CD environment variable. Go to `Project Settings` and then `CI/CD`. Scroll to the `Variables` section and add a new variable with name `GGI_GITLAB_TOKEN`. Input the access token you created in the last step as the value. Here's an example:\n\n![Add variable screen](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-3.png){: .shadow}\n\nWe can now execute the pipeline to begin the process of creating your OSPO infrastructure. Go to `CI/CD`, then `Pipelines`, and click on `Run pipeline`. After a couple of minutes, the pipeline should finish and the website will deploy. You will see something like this when the pipeline finishes:\n\n![Pipeline passed screen](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-4.png){: .shadow}\n\nInfrastructure for your open source program office is now ready!\n\n### Using the tools\n\nThe MyGGI project creates a set of 25 activities, along with a nice project board to help you visualize them:\n\n![Project board](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-5.png){: .shadow}\n\nUsers can click on specific activities (rendered as issues) to read the description of the activity, understand the tools and resources that might help them complete it, and begin completing relevant scorecards. Users can also define their own perspectives on the activities, as they see them from the organization's specific context. Then they can create tasks to narrow the scope of each activity so they can iterate on it and track progress. \n\nTheir work is displayed on a static website hosted on GitLab Pages and updated nightly according to the organization's progress on various activities and tasks. This web page is especially useful to present the program and its day-to-day evolution to the organization (or the world); participants, stakeholders, and executives can review it to learn more about the various initiatives, see what work is underway, and track the overall development of the organization's open source program office. The initial website looks like this:\n\n![Welcome screen of website](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-6.png){: .shadow}\n\n### Running your OSPO\n\nSelecting an open source program manager to oversee the work on the project boards is beneficial at this step. That person will:\n\n- Assign issues to team members to start working on new activities, create scorecards to track the work and associated tasks, and label them as \"In Progress\" instead of \"Not Started\".\n- Oversee the evolution of the work as it moves through various iterations, completing the scorecards with local resources and information, and closing issues as tasks are complete.\n- Ensure that issues keep making progress and, as team members complete them, assign new ones.\n\nAs changes occur in both the project and its issues, your OSPO's static website will regularly update to reflect the current status of activities, tasks, and the overall progress. After some time, for instance, the dashboard may look like this:\n\n![Dashboard with current status](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-9.png){: .shadow}\n\nYou're now on your way to establishing your organization's open source program office. Don't hesitate to connect with the [OSPO Alliance](https://ospo.zone/) for help and support as you continue your journey!\n\n_Boris Baldassari is an open source consultant at the Eclipse Foundation Europe, and an active contributor to the OSPO Alliance._\n\nCover image by [Clay Banks](https://unsplash.com/@claybanks?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://www.unsplash.com)\n{: .note}\n\n",[703,726,894],{"slug":2057,"featured":6,"template":678},"how-start-ospo-ten-minutes-using-gitlab","content:en-us:blog:how-start-ospo-ten-minutes-using-gitlab.yml","How Start Ospo Ten Minutes Using Gitlab","en-us/blog/how-start-ospo-ten-minutes-using-gitlab.yml","en-us/blog/how-start-ospo-ten-minutes-using-gitlab",{"_path":2063,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2064,"content":2070,"config":2076,"_id":2078,"_type":16,"title":2079,"_source":17,"_file":2080,"_stem":2081,"_extension":20},"/en-us/blog/keeping-your-development-dry",{"title":2065,"description":2066,"ogTitle":2065,"ogDescription":2066,"noIndex":6,"ogImage":2067,"ogUrl":2068,"ogSiteName":692,"ogType":693,"canonicalUrls":2068,"schema":2069},"DRY development: A cheatsheet on reusability throughout GitLab","How to follow the DevOps principle of 'don't repeat yourself' to optimize CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683555/Blog/Hero%20Images/drylights.jpg","https://about.gitlab.com/blog/keeping-your-development-dry","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DRY development: A cheatsheet on reusability throughout GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Noah Ing\"},{\"@type\":\"Person\",\"name\":\"Joe Randazzo\"}],\n        \"datePublished\": \"2023-01-03\",\n      }",{"title":2065,"description":2066,"authors":2071,"heroImage":2067,"date":2073,"body":2074,"category":14,"tags":2075},[789,2072],"Joe Randazzo","2023-01-03","\nMore than 20 years ago, the book [The Pragmatic Programmer](https://pragprog.com/titles/tpp20/the-pragmatic-programmer-20th-anniversary-edition/) brought attention to the DRY principle, or “Don’t Repeat Yourself.\" This principle is defined as every piece of knowledge must have a single, unambiguous, authoritative representation within a system.\n\nThe main problem to solve here is minimizing duplication. As a development project is bombarded with new requests or changing requirements, DevOps teams must balance between development of net-new features or maintaining existing code. The important part is how to reduce duplicate knowledge across projects.\n\nThis tutorial explores the mechanisms throughout GitLab that leverage the DRY principle to cut down on code duplication and standardize on knowledge. To see working examples of reusability in action, take a look at this [repository](https://gitlab.com/guided-explorations/gitlab-ci-yml-tips-tricks-and-hacks/dry-repository-a-cheatsheet).\n\n## Minimizing duplication in CI/CD\n\n### include\n[`include`](https://docs.gitlab.com/ee/ci/yaml/index.html#include) can be used to transform a single .gitlab-ci.yml file into multiple files to improve readability and minimize duplication. For example, testing, security, or deployment workflows can be broken out into separate templates. This also allows [ownership](https://docs.gitlab.com/ee/user/project/codeowners/) of the files.\n\n\n```yaml\ninclude:\n  - template: CI/Build.gitlab-ci.yml\n  - template: CI/Test.gitlab-ci.yml\n  - template: CI/Security.gitlab-ci.yml\n  - template: CD/Deploy.gitlab-ci.yml\n\n```\n\n### YAML anchors\n[YAML anchors](https://docs.gitlab.com/ee/ci/yaml/yaml_optimization.html#anchors) can be used to reduce repeat syntax and extend blocks of CI workflow, including jobs, variables, and scripts.\n\n```yaml\n.test_template: &test_suite\n  image: ruby:2.6\n\nunit_test:\n  \u003C\u003C: *test_suite\n  script:\n    - echo \"Running a test here\"\n\nend_to_end_test:\n  \u003C\u003C: *test_suite\n  script:\n    - echo \"Running a test here\"\n\nsmoke_test:\n  \u003C\u003C: *test_suite\n  script:\n    - echo \"Running a test here\"\n```\n\n### extends\n[`extends`](https://docs.gitlab.com/ee/ci/yaml/index.html#extends) is similar to anchors with additional flexibility and readability. The major difference is it can be used with `includes`.\n\n```yaml\n\n.prepare_deploy:\n  stage: deploy\n  script:\n    - echo \"I am preparing the deploy\"\n  only:\n    - main\n\ndeploy_to_dev:\n  extends: .prepare_deploy\n  script:\n    - echo \"Deploy to dev environment\"\n  environment: dev\n\ndeploy_to_production:\n  extends: .prepare_deploy\n  script:\n    - echo \"Deploy to production environment\"\n  when: manual\n  environment: production\n```\n\n### !reference\n[`!reference`](https://docs.gitlab.com/ee/ci/yaml/yaml_optimization.html#reference-tags) enables the selection of keyword configuration from other job sections and reuse in the current session.\n\n```yaml\n.vars:\n  variables:\n    DEV_URL: \"http://dev-url.com\"\n    STAGING_URL: \"http://staging-url.com\"\n\n.setup_env:\n  script:\n    - echo \"Creating Environment\"\n\n.teardown_env:\n  after_script:\n    - echo \"Deleting Environment\"\n\nintegration_test:\n  variables: !reference [.vars, variables, DEV_URL]\n  script:\n    - !reference [.setup_env, script]\n    - echo \"Run Test\"\n  after_script:\n    - !reference [.teardown_env, after_script]\n\nperformance_test:\n  variables: !reference [.vars, variables]\n  script:\n    - !reference [.setup_env, script]\n    - echo \"Run Test\"\n  after_script:\n    - !reference [.teardown_env, after_script]\n```\n\n### Downstream pipelines\n[Downstream pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html) enable the breakout of microservices and their pipelines. A .gitlab-ci.yml file can be used for each service, and when a file or directory is changed, only that pipeline needs to be triggered improving the awareness and readability of what’s deploying.\n\n```yaml\nui:\n  trigger:\n    include: ui/.gitlab-ci.yml\n    strategy: depend\n  rules:\n    - changes: [ui/*]\n\nbackend:\n  trigger:\n    include: backend/.gitlab-ci.yml\n    strategy: depend\n  rules:\n    - changes: [backend/*]\n```\n\n![Dynamic child pipeline](https://about.gitlab.com/images/blogimages/2022-02-01-parent-child-vs-multi-project-pipelines/parent-child.png){: .shadow}\n\n### CI/CD variables\n[CI/CD variables](https://docs.gitlab.com/ee/ci/variables/) can be scoped to a specific level, including the project, group, instance level, or .gitlab-ci.yml level. The values can be stored and reused across a group for project inheritance or overwritten at the project level.\n\n```yaml\nvariables:\n  PROJECT_LEVEL_VARIABLES: \"I am first in line in precedence\"\n  GROUP_LEVEL_VARIABLES: \"I am second in line\"\n  INSTANCE_LEVEL_VARIABLES: \"I am in third place\"\n  GITLAB_CI_YML_LEVEL_VARIABLES: \"I am last in line of precedence\"\n\n```\n\n## Creating consistent code reviews across multiple teams\n\n### Description templates\n[Description templates](https://docs.gitlab.com/ee/user/project/description_templates.html) enable teams to define a consistent workflow for issues or merge requests. For example, the MR template can define a checklist for rolling out to a feature to ensure it’s documented, quality tested, and reviewed by appropriate team members. Here are [MR templates](https://gitlab.com/gitlab-org/gitlab/-/tree/master/.gitlab/merge_request_templates) that GitLab team members use daily.\n\n```md\n\u003C!-- These templates can be set at the instance or group level to share amongst the organization: https://docs.gitlab.com/ee/user/project/description_templates.html#set-instance-level-description-templates -->\n\n## What does this MR do?\n\n\u003C!-- Briefly describe what this MR is about. -->\n\n## Related issues\n\n\u003C!-- Link related issues below. -->\n\n## Create a checklist for the author or reviewer\n- [ ] Optional. Consider taking this writing course before publishing a change.\n- [ ] Follow the documentation process stated here.\n- [ ] Tag this user group if this applies.\n\n\n\u003C!-- Quick Actions - See https://docs.gitlab.com/ee/user/project/quick_actions.html#issues-merge-requests-and-epics for a list of all the quick actions available. -->\n\n\u003C!-- Add a label to assign a specific workflow using scoped labels -->\n/label ~documentation ~\"type::maintenance\" ~\"docs::improvement\" ~\"maintenance::refactor\"\n\n\u003C!-- Apply draft format automatically -->\n/draft\n\n\u003C!-- Assign myself or a usergroup -->\n/assign me\n```\n\n### Project templates\n[Project templates](https://docs.gitlab.com/ee/user/group/custom_project_templates.html) can be used to define an initial project structure for when new services are being developed. This gives a consistent starting point for projects that come equipped with the latest file configurations and defaults.\n\n### File templates\n[File templates](https://docs.gitlab.com/ee/administration/settings/instance_template_repository.html) are similar to project templates but are default files to choose from when adding a new file to your repository. The team then can quickly choose from files that have best practices baked in and organization defaults.\n\n## Defining a Pipeline Center of Excellence project for CI/CD workflows\n\nAs you 'productionize' your CI/CD workflows, it’s recommended to create a “Pipeline Center of Excellence” project that contains templates, containers, or other abstracted constructs that can be adopted throughout the organization. This project contains file or CI/CD templates that have the best practices or well-formed workflows defined for development teams to quickly adopt (includes) without recreating the wheel. To explore this in practice, visit [Pipeline COE](https://gitlab-org.gitlab.io/professional-services-automation/pipelinecoe/pipeline-templates/#/) documentation written by the GitLab Professional Services team.\n\nHave a reusable component to suggest or that we missed? Add a comment to this blog post or suggest a change to this file!\n\n## Related posts\n- [How to keep up with CI/CD best practices](https://about.gitlab.com/blog/how-to-keep-up-with-ci-cd-best-practices/)\n- [How to become more productive with GitLab CI](https://about.gitlab.com/blog/how-to-become-more-productive-with-gitlab-ci/)\n- [A visual guide to GitLab CI/CD caching](https://about.gitlab.com/blog/a-visual-guide-to-gitlab-ci-caching/)\n\nCover image by [Federico Beccari](https://unsplash.com/@federize?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://www.unsplash.com).\n",[832,937,894,725],{"slug":2077,"featured":6,"template":678},"keeping-your-development-dry","content:en-us:blog:keeping-your-development-dry.yml","Keeping Your Development Dry","en-us/blog/keeping-your-development-dry.yml","en-us/blog/keeping-your-development-dry",{"_path":2083,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2084,"content":2090,"config":2096,"_id":2098,"_type":16,"title":2099,"_source":17,"_file":2100,"_stem":2101,"_extension":20},"/en-us/blog/can-chatgpt-resolve-gitlab-issues",{"title":2085,"description":2086,"ogTitle":2085,"ogDescription":2086,"noIndex":6,"ogImage":2087,"ogUrl":2088,"ogSiteName":692,"ogType":693,"canonicalUrls":2088,"schema":2089},"Testing ChatGPT: Can it solve a GitLab issue?","We put ChatGPT to the test to see if it could contribute to GitLab. Here's what we learned.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670171/Blog/Hero%20Images/akshay-nanavati-Zq6HerrBPEs-unsplash.jpg","https://about.gitlab.com/blog/can-chatgpt-resolve-gitlab-issues","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Testing ChatGPT: Can it solve a GitLab issue?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Coghlan\"},{\"@type\":\"Person\",\"name\":\"Fatima Sarah Khalid\"}],\n        \"datePublished\": \"2022-12-15\",\n      }",{"title":2085,"description":2086,"authors":2091,"heroImage":2087,"date":2093,"body":2094,"category":14,"tags":2095},[1464,2092],"Fatima Sarah Khalid","2022-12-15","\nChatGPT has taken the tech world by storm since its [launch on November 30](https://openai.com/blog/chatgpt/). Media coverage, front page posts on Hacker News, Twitter threads, and videos - everywhere you look, there is another story.\n\nThe [GitLab Slack](/handbook/communication/#slack) was no different. In threads across Slack channels, including those for developer evangelism, UX, the CEO, random news, and every space in between, our team was chatting about this exciting new tool.\n\nAs we got more familiar with the tool, we started to learn about numerous things it can do. Here are a few that we found:\n\n- It can write poetry about GitLab features. \n- It can write blog posts.\n- It can write unit tests.\n- It gives advice on how to use certain features of GitLab.\n- It conducts competitive analysis.\n\nThere’s quite a bit more out there, including [inventing a new language](https://maximumeffort.substack.com/p/i-taught-chatgpt-to-invent-a-language) and [building a virtual machine](https://www.engraved.blog/building-a-virtual-machine-inside/). We can’t recall any technology that has generated more excitement in such a short time.\n\nWe acknowledge there are ethical and licensing concerns around using AI-generated code. For the purpose of this blog post, we will focus strictly on the capabilities of ChatGPT.\n\n## Testing ChatGPT\n\nAs members of GitLab’s [Developer Relations team](/handbook/marketing/developer-relations/), where we’re focused on growing our community of contributors and evangelists, our first reaction was to think of how this tool can help our contributors. The responses to questions like “How can I get started contributing to GitLab?” were cool but didn’t move the needle. So then we asked ourselves: Can we use ChatGPT to make a contribution to GitLab?\n\nHaving already been testing the tool, we knew we’d need to look for a very specific type of issue. We started to fine-tune our search. Here are the steps we took to find a potential issue:\n\n- Visited [https://gitlab.com/gitlab-org](https://gitlab.com/gitlab-org) and pulled up all the open issues by clicking on `Issues` in the left side nav.\n- Searched for all issues with the [“good for new contributors” label](https://gitlab.com/gitlab-org/gitlab/-/issues/?sort=updated_desc&state=opened&label_name%5B%5D=good%20for%20new%20contributors&first_page_size=20). This returned 482 issues at the time of writing.\n- Set [`“Assignee = None”`](https://gitlab.com/gitlab-org/gitlab/-/issues/?sort=updated_desc&state=opened&label_name%5B%5D=good%20for%20new%20contributors&assignee_id=None&first_page_size=20) to filter out issues that have already been assigned to someone. This trimmed things down even more.\n\nAfter poking through a few issues, this one caught our attention: an issue that is a request to [de-depulicate identical dropdown functionality from two Vue components into a shared component](https://gitlab.com/gitlab-org/gitlab/-/issues/221293).\n\n### Getting started\n\nWe opened up a ChatGPT thread via [https://chat.openai.com/chat](https://chat.openai.com/chat). If you don’t already have an account, you’ll need to register with OpenAI.\n\nThen we started a thread with Assistant (that’s what is returned when you ask ChatGPT what is its name). We asked Assistant to create a shared Vue component called `SharedDropdownFilter` from the duplicate code in `GroupsDropdownFilter` and `ProjectsDropdownFilter` and passed the scripts of both components.\n\nChatGPT replied with:\n\n>You can create a new `SharedDropdownFilter` component that contains the duplicate code and use it in both `GroupsDropdownFilter` and `ProjectsDropdownFilter`:\n\nand provided a template to describe the use. The template provided wasn’t quite what we were looking for.\n\n```vue\n\u003Ctemplate>\n  \u003CGlDropdown\n    :aria-label=\"label\"\n    :class=\"{ disabled: loading }\"\n    :disabled=\"loading\"\n    :selected=\"selectedProjectsLabel\"\n    :variant=\"multiSelect ? 'inline' : 'single-line'\"\n  >\n    \u003Ctemplate v-slot:menu>\n      \u003Cdiv class=\"py-4\">\n        \u003CGlSearchBoxByType\n          autofocus\n          class=\"search-box\"\n          :placeholder=\"searchPlaceholder\"\n          :type=\"searchType\"\n          v-model=\"searchTerm\"\n        />\n\n        \u003CGlDropdownHeader\n          v-if=\"selectedProjects.length && multiSelect\"\n          :class=\"{ 'd-none': !searchTerm }\"\n          class=\"js-dropdown-header\"\n        >\n          \u003Cspan>\n            {{ selectedProjectsLabel }}\n          \u003C/span>\n\n          \u003Cbutton\n```\n\nSo we tried again, this time being more specific and asking ChatGPT to “Create a shared Vue component that combines the two components `ProjectsDropdownFilter` and  `GroupsDropdownFilter`”. We passed in the full component files, including the templates.\n\nChatGPT responded with instructions, a sample Vue component, and an explanation of the functionality:\n\n> Here is a sample implementation of a shared Vue component that combines the two components `ProjectsDropdownFilter` and `GroupsDropdownFilter`:\n\n```vue\n\u003Ctemplate>\n  \u003Cdiv>\n    \u003CProjectsDropdownFilter\n      :groupId=\"groupId\"\n      :multiSelect=\"multiSelect\"\n      :label=\"label\"\n      :queryParams=\"queryParams\"\n      :defaultProjects=\"defaultProjects\"\n      @selected=\"onProjectsSelected\"\n    />\n    \u003CGroupsDropdownFilter\n      :multiSelect=\"multiSelect\"\n      :label=\"label\"\n      :queryParams=\"queryParams\"\n      :defaultGroups=\"defaultGroups\"\n      @selected=\"onGroupsSelected\"\n    />\n  \u003C/div>\n\u003C/template>\n\n\u003Cscript>\nimport { ProjectsDropdownFilter } from './ProjectsDropdownFilter.vue';\nimport { GroupsDropdownFilter } from './GroupsDropdownFilter.vue';\n\nexport default {\n  name: 'SharedDropdownFilter',\n  components: {\n    ProjectsDropdownFilter,\n    GroupsDropdownFilter,\n  },\n  props: {\n    groupId: {\n      type: Number,\n      required: true,\n    },\n    multiSelect: {\n      type: Boolean,\n      required: false,\n      default: false,\n    },\n    label: {\n      type: String,\n      required: false,\n      default: 'Dropdown filter',\n    },\n    queryParams: {\n      type: Object,\n      required: false,\n      default: () => ({}),\n    },\n    defaultProjects: {\n      type: Array,\n      required: false,\n      default: () => [],\n    },\n    defaultGroups: {\n      type: Array,\n      required: false,\n      default: () => [],\n    },\n  },\n  data() {\n    return {\n      selectedProjects: this.defaultProjects || [],\n      selectedGroups: this.defaultGroups || [],\n    };\n  },\n  methods: {\n    onProjectsSelected(selectedProjects) {\n      this.selectedProjects = selectedProjects;\n      this.$emit('selected', this.selectedProjects, this.selectedGroups);\n    },\n    onGroupsSelected(selectedGroups) {\n      this.selectedGroups = selectedGroups;\n      this.$emit('selected', this.selectedProjects, this.selectedGroups);\n    },\n  },\n};\n\u003C/script>\n```\n\n> In this implementation, the `SharedDropdownFilter` component is rendered as a container for the `ProjectsDropdownFilter` and` GroupsDropdownFilter` components. It passes down the `groupId`, `multiSelect`, `label`, `queryParams`, and `defaultProjects` props to the `ProjectsDropdownFilter` component, and the `multiSelect`, `label`, `queryParams`, and `defaultGroups` props to the `GroupsDropdownFilter` component. It also listens to the selected event emitted by the two components and updates its own `selectedProjects` and `selectedGroups` data accordingly. The `SharedDropdownFilter` component emits its own `selected` event with the updated `selectedProjects` and `selectedGroups` data when it receives the selected event from the `ProjectsDropdownFilter` and `GroupsDropdownFilter` components. This allows the parent component of the `SharedDropdownFilter` to be notified whenever the selected projects and groups are updated, so that it can take appropriate action, such as updating the data it displays.\n\nWith ChatGPT’s shared Vue component, we could start an MR for the issue, but would need to manually review the code and run tests in order to make sure the deduplication was done correctly.\n\n### Adjusting the filters\n\nIn hopes of finding an issue that ChatGPT could fully solve, this time we filtered using two labels (`good for new contributors` and `quick win`), Assignee = None, and added the keyword “refactor” to look for issues that required less net new code. At the time of writing, this [query](https://gitlab.com/gitlab-org/gitlab/-/issues/?search=refactor&sort=updated_asc&state=opened&label_name%5B%5D=quick%20win&label_name%5B%5D=good%20for%20new%20contributors&assignee_id=None&first_page_size=20) returned one issue: [a refactor for widget state tests](https://gitlab.com/gitlab-org/gitlab/-/issues/228611).\n\nAfter reviewing the issue, we opened the `/spec/frontend/vue_merge_request_widget/stores/get_state_key_spec.js` file mentioned in the issue description by navigating to that file on GitLab.com SaaS and opening the dropdown on the blue button at the top of the file frame, selecting “Open in Web IDE”, and then clicking the button to open the file in GitLab’s built-in Web IDE.\n\nWe copied the code from the file and asked ChatGPT to refactor these tests into a table of `it.each`. It was interesting to watch the output as ChatGPT provided an example of how we could refactor the tests using `it.each`:\n\n```javascript\nimport getStateKey from '~/vue_merge_request_widget/stores/get_state_key';\n\ndescribe('getStateKey', () => {\n  const context = {\n    mergeStatus: 'checked',\n    autoMergeEnabled: false,\n    canMerge: true,\n    onlyAllowMergeIfPipelineSucceeds: false,\n    isPipelineFailed: false,\n    hasMergeableDiscussionsState: false,\n    isPipelineBlocked: false,\n    canBeMerged: false,\n    projectArchived: false,\n    branchMissing: false,\n    commitsCount: 2,\n    hasConflicts: false,\n    draft: false,\n    detailedMergeStatus: null,\n  };\n\n  it.each([\n    [{}, null],\n    [{detailedMergeStatus: 'MERGEABLE'}, 'readyToMerge'],\n    [{detailedMergeStatus: 'DISCUSSIONS_NOT_RESOLVED'}, 'unresolvedDiscussions'],\n    [{detailedMergeStatus: 'DRAFT_STATUS'}, 'draft'],\n    [{detailedMergeStatus: 'CI_MUST_PASS'}, 'pipelineFailed'],\n    [{shouldBeRebased: true}, 'rebase'],\n    [{hasConflicts: true}, 'conflicts'],\n    [{detailedMergeStatus: 'CHECKING'}, 'checking'],\n    [{commitsCount: 0}, 'nothingToMerge'],\n    [{commitsCount: 1, branchMissing: true}, 'missingBranch'],\n    [{projectArchived: true}, 'archived'],\n  ])('should return \"%s\" when called with %p', (expected, input) => {\n    const bound = getStateKey.bind(Object.assign({}, context, input));\n    expect(bound()).toEqual(expected);\n  });\n\n  it('returns rebased state key', () => {\n    const context = {\n      mergeStatus: 'checked',\n      autoMergeEnabled: false,\n      canMerge: true,\n      onlyAllowMergeIfPipelineSucceeds: true,\n      isPipelineFailed: true,\n      hasMergeableDiscussionsState: false,\n      isPipelineBlocked: false,\n      canBeMerged: false,\n      shouldBeRebased: true,\n      projectArchived: false,\n      branchMissing: false,\n      commitsCount: 2,\n      hasConflicts: false,\n      draft: false,\n    };\n    const bound = getStateKey.bind(context);\n\n    expect(bound()).toEqual('rebase');\n  });\n});\n\n\n```\n\nAlong with an explanation of `it.each`:\n\n> Note that `it.each` is a utility provided by Jest that allows you to write the same test with different inputs and expected results. This can make your tests more concise and easier to read.\n\nOur next steps were to:\n- Paste the output from ChatGPT into the Web IDE.\n- Click the blue “Commit” button to commit the code.\n- Name the branch.\n- Open a merge request.\n\nAll of this can be done inside [GitLab’s Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/) without leaving the browser.\n\n### Examining the results\n\nOnce we opened the merge request, a CI/CD pipeline was created to test the change. Unfortunately, the pipeline jobs for the Jest tests failed because the tests were returning `null` for each case instead of the expected results. We went back to ChatGPT to see if it could help us understand why the test was returning null for all cases. ChatGPT suggested a few different implementations to check to make sure things are defined currently:\n\n> There are a few reasons why the tests in the `it.each` function might be returning null for every case. Here are a few possibilities:\n>\n> If the `getStateKey` function is not implemented correctly, it might always return null, regardless of the input.\n> If the `getStateKey` function relies on certain variables that are not defined in the test cases, it might return null because those variables are undefined.\n> If the context object passed to the `getStateKey` function is not being merged correctly with the input object in the test cases, the function might not have access to all of the necessary information, and it might return null.\n\nWhile ChatGPT couldn’t solve the issue with the returned value directly, it provided a lot of possibilities to look into and offered suggestions of what variables or functions to debug. The suggestions prompted us to add code changes that logged the variable values onto the [JavaScript debug console](https://www.w3schools.com/js/js_debugging.asp), find the implementation of the `getStateKey` function, and figure out why the return value was null. Whenever we came across something unfamiliar in the code, like syntax in the `it.each` that wasn’t familiar, we asked ChatGPT for clarification or a helpful example. Many times throughout this experiment, working with ChatGPT felt like “rubber duck debugging,” but with an AI with which you have to be very specific about your ask.\n\n## What we learned from ChatGPT\n\nIn the end, we weren’t able to figure out why our tests were returning null, so we asked the front-end team if someone could review the code. Senior Frontend Engineer [Angelo Gulina](https://gitlab.com/agulina) reviewed the MR. He found that the solution was actually quite trivial: The order of parameters was inverted, resulting in a comparison that led to null! In his assessment, ChatGPT wasn’t able to provide a working solution, but would be able to provide solutions and ideas to an engineer with some experience with the codebase. It delivered a clean, organized solution and answered the task of combining the tests into an it.each table. It could not, however, catch the actual error (the inversion of parameters) or correctly guess why the tests were returning null.\n\nLet's circle back to the question that started this experiment: Can we use ChatGPT to contribute to GitLab? At this time, we’d say, \"yes,\" and you will need some understanding of the code to complete your solution. Since ChatGPT is a language model trained by OpenAI, it can only answer questions and provide information addressed in the model, which means answers requiring contextual specificity may fall short of what is needed to resolve an issue. However, it’s a tool that can help you if you’re stuck, need more clarification on a code snippet, or are trying to refactor some code. It was fascinating for us to experiment with ChatGPT and we were excited to see what it was capable of. The code provided, however, lacked some of the valuable insight and industry experience that a community of contributors can provide.\n\nAt GitLab, our [community and our open source stewardship](https://about.gitlab.com/company/strategy/#dual-flywheels) are part of our company strategy. Thousands of open source contributors worldwide have helped make GitLab what it is today. We see potential for ChatGPT and similar AI tools, not as a replacement for our community, but a way to make our community more efficient and enable more people to contribute GitLab.\n\n\n\n\n",[726,268,1445,894],{"slug":2097,"featured":6,"template":678},"can-chatgpt-resolve-gitlab-issues","content:en-us:blog:can-chatgpt-resolve-gitlab-issues.yml","Can Chatgpt Resolve Gitlab Issues","en-us/blog/can-chatgpt-resolve-gitlab-issues.yml","en-us/blog/can-chatgpt-resolve-gitlab-issues",{"_path":2103,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2104,"content":2109,"config":2114,"_id":2116,"_type":16,"title":2117,"_source":17,"_file":2118,"_stem":2119,"_extension":20},"/en-us/blog/how-to-continously-test-web-apps-apis-with-hurl-and-gitlab-ci-cd",{"title":2105,"description":2106,"ogTitle":2105,"ogDescription":2106,"noIndex":6,"ogImage":1498,"ogUrl":2107,"ogSiteName":692,"ogType":693,"canonicalUrls":2107,"schema":2108},"How to continuously test web apps and APIs with Hurl and GitLab CI/CD","Hurl as a CLI tool can be integrated into the DevSecOps platform to continuously verify, test, and monitor targets. It also offers integrated unit test reports in GitLab CI/CD.","https://about.gitlab.com/blog/how-to-continously-test-web-apps-apis-with-hurl-and-gitlab-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to continuously test web apps and APIs with Hurl and GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2022-12-14\",\n      }",{"title":2105,"description":2106,"authors":2110,"heroImage":1498,"date":2111,"body":2112,"category":14,"tags":2113},[1504],"2022-12-14","\nTesting websites, web applications, or generally everything reachable with the HTTP protocol, can be a challenging exercise. Thanks to tools like `curl` and `jq`, [DevOps workflows have become more productive](/blog/devops-workflows-json-format-jq-ci-cd-lint/) and even simple monitoring tasks can be automated with CI/CD pipeline schedules. Sometimes, use cases require specialized tooling with custom HTTP headers, parsing expected responses, and building end-to-end test pipelines. Stressful incidents also need good and fast tools that help analyze the root cause and quickly mitigate and fix problems.\n\n[Hurl](https://hurl.dev) is an open-source project developed and maintained by Orange, and uses libcurl from curl to provide HTTP test capabilities. It aims to tackle complex HTTP test challenges by providing a simple plain text configuration to describe HTTP requests. It can chain requests, capture values, and evaluate queries on headers and body responses. So far, so good: Hurl does not only support fetching data, it can be used to test HTTP sessions and XML (SOAP) and JSON (REST) APIs.\n\n## Getting Started\n\nHurl comes in various package formats to [install](https://hurl.dev/docs/installation.html). On macOS, a Homebrew package is available.\n\n```sh\n$ brew install hurl\n```\n\n## First steps with Hurl\n\nHurl proposes to start with the configuration file format first, which is a great way to learn the syntax step by step. The following example creates a new `gitlab-contribute.hurl` configuration file that will do two things: execute a GET HTTP request on `https://about.gitlab.com/community/contribute/` and check whether its HTTP response contains the HTTP protocol `2` and status code `200` (OK).\n\n```sh\n$ vim gitlab-contribute.hurl\n\nGET https://about.gitlab.com/community/contribute/\n\nHTTP/2 200\n$ hurl --test gitlab-contribute.hurl\ngitlab-contribute.hurl: Running [1/1]\ngitlab-contribute.hurl: Success (1 request(s) in 413 ms)\n--------------------------------------------------------------------------------\nExecuted files:  1\nSucceeded files: 1 (100.0%)\nFailed files:    0 (0.0%)\nDuration:        415 ms\n```\n\nInstead of creating configuration files, you can also use the `echo “...” | hurl` command pattern. The following command tests against about.gitlab.com and checks whether the HTTP response protocol is 1.1 and the status is OK (200). The two newline characters `\\n` are required for separation.\n\n```sh\n$ echo \"GET https://about.gitlab.com\\n\\nHTTP/1.1 200\" | hurl --test\n```\n\n![hurl CLI run against about.gitlab.com, failed request](https://about.gitlab.com/images/blogimages/hurl-continuous-website-testing/hurl_assert_failure.png)\n\nThe command failed, and it says that the response protocol version is actually `2`. Let's adjust the test run to expect `HTTP/2`:\n\n```sh\necho \"GET https://about.gitlab.com\\n\\nHTTP/2 200\" | hurl --test\n```\n## Asserting HTTP responses\n\nHurl allows defining [assertions](https://hurl.dev/docs/asserting-response.html) to control when the tests fail. These can be defined for different HTTP response types:\n\n- Expected HTTP protocol version and status\n- Headers\n- Body\n\nThe configuration language allows users to define queries with predicates that allow to compare, chain, and execute different assertions.\n\nThis is the easiest way to verify that the HTTP response contains what is expected to be a string or sentence on the website, for example. If the string does not exist, this can indicate that it was changed unexpectedly, or that the website is down. Let's revisit the example with testing GET https://about.gitlab.com/community/contribute/ and add an expected string `Everyone can contribute` as a new assertion, `body contains \u003Cstring>` is the expected configuration syntax for [body asserts](https://hurl.dev/docs/asserting-response.html#body-assert).\n\n```sh\n$ vim gitlab-contribute.hurl\n\nGET https://about.gitlab.com/community/contribute/\n\nHTTP/2 200\n\n[Asserts]\nbody contains \"Everyone should contribute\"\n\n$ hurl --test gitlab-contribute.hurl\n```\n\n**Exercise:** Fix the test by updating the asserts line to `Everyone can contribute` and run Hurl again.\n\n### Asserting responses: JSON and XML\n\n[JSONPath](https://hurl.dev/docs/asserting-response.html#jsonpath-assert) automatically parses the JSON response (a built-in `jq with curl` parser so to speak), and allows users to compare the value to verify the asserts (more below). The XML format can be found in an [RSS feed on about.gitlab.com](https://about.gitlab.com/atom.xml) and parsed using [XPath](https://hurl.dev/docs/asserting-response.html#xpath-assert). The following example from `atom.xml` should be verified with Hurl:\n\n```xml\n\u003Cfeed xmlns=\"http://www.w3.org/2005/Atom\">\n\u003Ctitle>GitLab\u003C/title>\n\u003Cid>https://about.gitlab.com/blog\u003C/id>\n\u003Clink href=\"https://about.gitlab.com/blog/\"/>\n\u003Cupdated>2022-11-21T00:00:00+00:00\u003C/updated>\n\u003Cauthor>\n\u003Cname>The GitLab Team\u003C/name>\n\u003C/author>\n\u003Centry>\n...\n\u003C/entry>\n\u003Centry>\n...\n\u003C/entry>\n\u003Centry>\n…\n```\n\nIt is important to note that XML namespaces need to be specified for parsing. Hurl allows users to replace the first default namespace with the `_` character to avoid adding `http://www.w3.org/2005/Atom` everywhere, the XPath is now shorter with `string(//_:feed/_:entry)` to get a list of all entries. This value is captured in the `entries` variable, which can be compared to match a specific string, `GitLab` in this example. Additionally, the feed id and author name is checked.\n\n```\n$ vim gitlab-rss.hurl\n\nGET https://about.gitlab.com/atom.xml\n\nHTTP/2 200\n\n[Captures]\nentries: xpath \"string(//_:feed/_:entry)\"\n\n[Asserts]\nvariable \"entries\" matches \"GitLab\"\n\nxpath \"string(//_:feed/_:id)\" == \"https://about.gitlab.com/blog\"\nxpath \"string(//_:feed/_:author/_:name)\" == \"The GitLab Team\"\n\n$ hurl –test gitlab-rss.hurl\n```\n\nHurl allows users to capture the value from responses into [variables](https://hurl.dev/docs/templates.html#variables) that can be used later. This method can also be helpful to model end-to-end testing workflows: First, check the website health status and retrieve a CSRF token, and then try to log into the website by sending the token again.\n\nREST APIs that are expected to always return a specified field, or monitoring a website health state [becomes a breeze using Hurl](https://hurl.dev/docs/tutorial/chaining-requests.html#test-rest-api).\n\n## Use Hurl in GitLab CI/CD jobs\n\nThe easiest way to integrate Hurl into GitLab CI/CD is to use the official container image. The Hurl project provides a [container image on Docker Hub](https://hub.docker.com/r/orangeopensource/hurl), which did not work in CI/CD at first glance. After talking with the maintainers, the [entrypoint override](https://docs.gitlab.com/ee/ci/docker/using_docker_images.html#override-the-entrypoint-of-an-image) was identified as a solution for using the image in GitLab CI/CD. Note that the Alpine based image uses the libcurl library that does not support HTTP/2 yet - the test results are different to a Debian base image (follow [this issue report](https://github.com/Orange-OpenSource/hurl/issues/1082) for the problem analysis).\n\nThe following example is kept short to run the container image, override the entrypoint, and run Hurl with passing in the test using the `echo` CLI command.\n\n```yaml\nhurl-standalone:\n  image:\n    name: ghcr.io/orange-opensource/hurl:latest\n    entrypoint: [\"\"]\n  script:\n    - echo -e \"GET https://about.gitlab.com/community/contribute/\\n\\nHTTP/1.1 200\" | hurl --test --color\n```\n\nThe Hurl test report is printed into the CI/CD job trace log, and returns succesfully.\n\n```sh\n$ echo -e \"GET https://about.gitlab.com/community/contribute/\\n\\nHTTP/1.1 200\" | hurl --test --color\n-: Running [1/1]\n-: Success (1 request(s) in 280 ms)\n--------------------------------------------------------------------------------\nExecuted files:  1\nSucceeded files: 1 (100.0%)\nFailed files:    0 (0.0%)\nDuration:        283 ms\nCleaning up project directory and file based variables\n00:00\nJob succeeded\n```\n\nThe next iteration is to create a CI/CD job template that provides generic attributes, and allows users to dynamically run the job with an environment variable called `HURL_URL`.\n\n```yaml\n# Hurl job template\n.hurl-tmpl:\n  # Use the upstream container image and override the ENTRYPOINT to run CI/CD script\n  # https://docs.gitlab.com/ee/ci/docker/using_docker_images.html#override-the-entrypoint-of-an-image\n  image:\n    name: ghcr.io/orange-opensource/hurl:1.8.0\n    entrypoint: [\"\"]\n  variables:\n    HURL_URL: \"about.gitlab.com/community/contribute/\"\n  script:\n    - echo -e \"GET https://${HURL_URL}\\n\\nHTTP/1.1 200\" | hurl --test --color\n\nhurl-about-gitlab-com:\n  extends: .hurl-tmpl\n  variables:\n    HURL_URL: \"about.gitlab.com/jobs/\"\n```\n\nRunning GET commands with expected HTTP results is not the only use case, and the Hurl maintainers thought about this already. The next section explains how to create a custom container image; you can skip to the [DevSecOps workflows](#devSecOps-workflows-with-hurl) section to learn more about efficient Hurl configuration use cases.\n\n### Custom container image with Hurl\n\nMaintaining and building a custom container image adds more work, but also helps with avoiding running unknown container images in CI/CD pipelines. The latter is often a requirement for compliance and security. _Since the Hurl Debian package supports detecting HTTP/2 as a protocol, this blog post will focus on building a custom image, and run all tests using this image. If you plan on using the upstream container image, make sure to review the test configuration for the HTTP protocol version detection._\n\nThe Hurl documentation provides multiple ways to install Hurl. For this example, Debian 11 Bullseye (slim) is used. Hurl comes with a package dependency on `libxml2` which can either be installed manually with then running the `dpkg` command, or by using `apt install` to install a local package and automatically resolve the dependencies.\n\nThe following CI/CD example uses a job template which defines the Hurl version as environment variable to avoid repetitive use, and downloads and installs the Hurl Debian package. The `hurl-gitlab-com` job extends the CI/CD job template and runs a one-line test against `https://gitlab.com` and expects to return `HTTP/2` as HTTP protocol version, and `200` as status.\n\n```yaml\n# CI/CD job template\n.hurl-tmpl:\n  variables:\n    HURL_VERSION: 1.8.0\n  before_script:\n    - DEBIAN_FRONTEND=noninteractive apt update && apt -y install jq curl ca-certificates\n    - curl -LO \"https://github.com/Orange-OpenSource/hurl/releases/download/${HURL_VERSION}/hurl_${HURL_VERSION}_amd64.deb\"\n    - DEBIAN_FRONTEND=noninteractive apt -y install \"./hurl_${HURL_VERSION}_amd64.deb\"\n\nhurl-gitlab-com:\n  extends: .hurl-tmpl\n  script:\n    - echo -e \"GET https://gitlab.com\\n\\nHTTP/2 200\" | hurl --test --color\n```\n\nThe next section describes how to optimize the CI/CD pipelines for more efficient schedules and runs to monitor websites and not waste too many resources and CI/CD minutes. You can also skip it and [scroll down to more advanced Hurl examples in GitLab CI/CD](#devsecops-workflows-with-hurl).\n\n### CI/CD efficiency: Hurl container image\n\nThe installation steps for Hurl, and its dependencies, can waste resources and increase the pipeline job runtime every time. To make the CI/CD pipelines more efficient, we want to use a container image that already provides Hurl pre-installed. The following steps are required for creating a container image:\n\n- Use Debian 11 Slim (FROM).\n- Install dependencies to download Hurl (`curl`, `ca-certificates`). `jq` is installed for convenience to access it from CI/CD commands when needed later.\n- Download the Hurl Debian package, and use `apt install` to install its dependencies automatically.\n- Clear the apt lists cache to enforce apt update again, and avoid security issues.\n- Hurl is installed into the PATH, specify the default command being run. This allows running the container without having to specify a command.\n\nThe steps to install the packages are separated for better readability; an optimization for the `docker-build` job can happen by chaining the `RUN` commands into one long command.\n\n`Dockerfile`\n```\nFROM debian:11-slim\n\nENV DEBIAN_FRONTEND noninteractive\n\nARG HURL_VERSION=1.8.0\n\nRUN apt update && apt install -y curl jq ca-certificates\nRUN curl -LO \"https://github.com/Orange-OpenSource/hurl/releases/download/${HURL_VERSION}/hurl_${HURL_VERSION}_amd64.deb\"\n# Use apt install to determine package dependencies instead of dpkg\nRUN apt -y install \"./hurl_${HURL_VERSION}_amd64.deb\"\nRUN rm -rf /var/lib/apt/lists/*\n\nCMD [\"hurl\"]\n```\n\nNote that the `HURL_VERSION` variable can be overridden by passing the variable and value into the container build job later. It is intentionally not using an automated script that always uses the [latest release](https://github.com/Orange-OpenSource/hurl/releases) to avoid breaking the behavior, and enforces a controlled upgrade cycle for container images in production.\n\nOn GitLab.com SaaS, you can include the `Docker.gitlab-ci.yml` CI/CD template which will automatically detect the `Dockerfile` file and start building the image using the shared runners, and push it to the [GitLab container registry](https://docs.gitlab.com/ee/user/packages/container_registry/). For self-managed instances or own runners on GitLab.com SaaS, it is recommended to decide whether to use and setup [Docker-in-Docker](https://docs.gitlab.com/ee/ci/docker/using_docker_build.html) or [Kaniko](https://docs.gitlab.com/ee/ci/docker/using_kaniko.html), Podman, or other container image build tools.\n\n```yaml\ninclude:\n  - template: Docker.gitlab-ci.yml\n```\n\nTo avoid running the Docker image build job every time, the job override definition specifies to [run it manually](https://docs.gitlab.com/ee/ci/yaml/#when). You can also use rules to [choose when to run the job](https://docs.gitlab.com/ee/ci/jobs/job_control.html), only when a Git tag is pushed for example.\n\n```yaml\ninclude:\n  - template: Docker.gitlab-ci.yml\n\n# Change Docker build to manual non-blocking\ndocker-build:\n  rules:\n    - if: '$CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH'\n      when: manual\n      allow_failure: true\n```\n\nOnce the container image is pushed to the registry, navigate into `Packages and Registries > Container Registries` and inspect the tagged image. Copy the image path for the latest tagged version and use it for the `image` attribute in the CI/CD job configuration.\n\n### Hurl container image in GitLab CI/CD example\n\nThe full example uses the previously built container image, and specifies the default `HURL_URL` variable. This can later be overridden by job definitions.\n\n_Please note that the image URL `registry.gitlab.com/everyonecancontribute/dev/hurl-playground:latest` is only used for demo purposes and not actively maintained or updated._\n\n```yaml\ninclude:\n  - template: Docker.gitlab-ci.yml\n\n# Change Docker build to manual non-blocking\ndocker-build:\n  rules:\n    - if: '$CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH'\n      when: manual\n      allow_failure: true\n\n# Hurl job template\n.hurl-tmpl:\n  image: registry.gitlab.com/everyonecancontribute/dev/hurl-playground:latest\n  variables:\n    HURL_URL: gitlab.com\n\n# Hurl jobs that check websites\nhurl-dnsmichi-at:\n  extends: .hurl-tmpl\n  variables:\n    HURL_URL: dnsmichi.at\n  script:\n    - echo -e \"GET https://${HURL_URL}\\n\\nHTTP/1.1 200\" | hurl --test --color\n\nhurl-opsindev-news:\n  extends: .hurl-tmpl\n  variables:\n    HURL_URL: opsindev.news\n  script:\n    - echo -e \"GET https://${HURL_URL}\\n\\nHTTP/2 200\" | hurl --test --color\n```\n\nThe CI/CD configuration can further be optimized:\n\n- Create job templates that execute the same scripts and only differ in the `HURL_URL` variable.\n- Use Hurl configuration files that allow specifying variables on the CLI or as environment variables. More on this in the next section.\n\n## DevSecOps workflows with Hurl\n\nHurl allows users to describe HTTP instructions in a configuration file with the `.hurl` suffix. You can add the configuration files to Git, and review and approve changes in merge requests - with the changes run in CI/CD and reporting back any failures before merging.\n\nInspect the `use-cases/` directory in the [example project](https://gitlab.com/everyonecancontribute/dev/hurl-playground), and fork it to make changes and commit and run the CI/CD pipelines and reports. You can also clone the project and run the `tree` command in the terminal.\n\n```sh\n$ tree use-cases\nuse-cases\n├── dnsmichi.at.hurl\n├── gitlab-com-api.hurl\n├── gitlab-contribute.hurl\n└── hackernews.hurl\n```\n\nHurl supports the glob option which collects all configuration files matching a specific pattern.\n\n![Hurl configuration file run](https://about.gitlab.com/images/blogimages/hurl-continuous-website-testing/hurl_multiple_config_files_run.png)\n\n### Chaining requests\n\nSimilar to CI/CD pipelines, jobs, and stages, testing HTTP endpoints with Hurl can require multiple steps. First, ping the website for being reachable, and then try parsing expected results. Separating the requirements into two steps helps to analyze errors.\n\n- HTTP endpoint reachable, but expected string not in response - static website was changed, REST API misses a field, etc.\n- HTTP endpoint is unreachable, don’t try to understand why the follow-up tests fail.\n\nThe following example first sends a ping probe to the dev instance, and a check towards the production environment in the second request.\n\n```sh\n$ vim use-cases/everyonecancontribute-com.hurl\n\nGET https://everyonecancontribute.dev\n\nHTTP/2 200\n\nGET https://everyonecancontribute.com\n\nHTTP/2 200\n$ hurl --test use-cases/everyonecancontribute-com.hurl\n```\n\nIn this scenario, the TLS certificate of the dev instance expired, and Hurl halts the test immediately.\n\n![Hurl chained requests, failing the first test with TLS certificate problems](https://about.gitlab.com/images/blogimages/hurl-continuous-website-testing/hurl_chained_request_fail.png)\n\n### Hurl reports as JUnit test reports\n\nTreat website monitoring and web app tests as unit and end-to-end tests. The Hurl developers thought of that too - the CLI command provides different output options for the report: `--report-junit \u003Coutputpath>` integrates with [GitLab JUnit report](https://docs.gitlab.com/ee/ci/testing/unit_test_reports.html) support into merge requests and pipeline views.\n\nThe following configuration generates a JUnit report file into the value of the `HURL_JUNIT_REPORT` variable. It exists to avoid typing the path three times. The Hurl tests are run from the `use-cases/` directory using a glob pattern.\n\n```yaml\n# Hurl job template\n.hurl-tmpl:\n    image: registry.gitlab.com/everyonecancontribute/dev/hurl-playground:latest\n    variables:\n        HURL_URL: gitlab.com\n        HURL_JUNIT_REPORT: hurl_junit_report.xml\n\n# Hurl tests from configuration file, generating JUnit report integration in GitLab CI/CD\nhurl-report:\n    extends: .hurl-tmpl\n    script:\n      - hurl --test use-cases/*.hurl --report-junit $HURL_JUNIT_REPORT\n    after_script:\n      # Hack: Workaround for 'id' instead of 'name' in JUnit report from Hurl. https://gitlab.com/gitlab-org/gitlab/-/issues/299086\n      - sed -i 's/id/name/g' $HURL_JUNIT_REPORT\n    artifacts:\n      when: always\n      paths:\n        - $HURL_JUNIT_REPORT\n      reports:\n        junit: $HURL_JUNIT_REPORT\n```\n\nThe JUnit format returned by Hurl 1.8.0 defines the `id` attribute, but the GitLab JUnit integration expects the `name` attribute to be present. While writing this blog post, [the problem was discussed](https://github.com/Orange-OpenSource/hurl/issues/1067#issuecomment-1343264751) with the maintainers, and [the `name` attribute was implemented](https://github.com/Orange-OpenSource/hurl/issues/1078) and will be available in future releases. As a workaround with Hurl 1.8.0, the CI/CD [after_script](https://docs.gitlab.com/ee/ci/yaml/#after_script) section uses `sed` to replace the attributes after generating the report.\n\nThe [following example](https://gitlab.com/everyonecancontribute/dev/hurl-playground/-/merge_requests/10) fails on purpose with checking a different HTTP protocol version.\n\n```\nGET https://opsindev.news\n\n# This will fail on purpose\nHTTP/1.1 200\n\n[Asserts]\nbody contains \"Michael Friedrich\"\n```\n\n![Hurl test report in JUnit format integrated into GitLab](https://about.gitlab.com/images/blogimages/hurl-continuous-website-testing/hurl_gitlab_junit_integration_merge_request_widget_overlay.png)\n\nOnce the JUnit integration with Hurl tests from a glob pattern work, you can continue adding new `.hurl` configuration files to the GitLab repository and start testing in MRs, which will require review and approval workflows for production then.\n\n### Web review apps\n\nWebsite monitoring is only one aspect of using Hurl: Testing web applications deployed in review environments in the cloud, and in cloud-native clusters provides a native integration into [DevSecOps](https://about.gitlab.com/topics/devsecops/) workflows. The CI/CD pipelines will fail when Hurl tests are failing, and more insights are provided using merge request widgets reports.\n\n[Cloud Seed](https://docs.gitlab.com/ee/cloud_seed/index.html) provides the ability to deploy a web application to a major cloud provider, for example Google Cloud. After the deployment is successful, additional CI/CD jobs can be configured that verify that the deployed web app version does not introduce a regression, and provides all required data elements, API endpoints, etc. A similar workflow can be achieved by using review app environments with [webservers (Nginx, etc.), Docker, AWS, and Kubernetes](https://docs.gitlab.com/ee/ci/review_apps/#review-apps-examples). The review app [environment URL](https://docs.gitlab.com/ee/ci/environments/#create-a-dynamic-environment) is important for instrumenting the Hurl tests dynamically. The CI/CD variable [`CI_ENVIRONMENT_URL`](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html) is available when `environment:url` is specified in the review app configuration.\n\nThe following example tests the review app for [this blog post when written in a merge request](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/115548):\n\n```yaml\n# Test review apps with hurl for this blog post.\nhurl-review-test:\n  extends: .review-environment # inherits the environment settings\n  needs: [uncategorized-build-and-review-deploy] # waits until the website (sites/uncategorized) is deployed\n  stage: test\n  rules: # YAML anchor that runs the job only on merge requests\n    - \u003C\u003C: *if-merge-request-original-repo\n  image:\n    name: ghcr.io/orange-opensource/hurl:1.8.0\n    entrypoint: [\"\"]\n  script:\n    - echo -e \"GET ${CI_ENVIRONMENT_URL}\\n\\nHTTP/1.1 200\" | hurl --test --color\n```\n\nThe environment is specified in the [.review-environment job template](https://gitlab.com/gitlab-com/www-gitlab-com/-/blob/91d6fd72a424a3d913e79ebc2aefb23bbab85863/.gitlab-ci.yml#L332) and used to [deploy the website review job](https://gitlab.com/gitlab-com/www-gitlab-com/-/blob/91d6fd72a424a3d913e79ebc2aefb23bbab85863/.gitlab-ci.yml#L532). The relevant configuration snippet is shown here:\n\n```yaml\n.review-environment:\n  variables:\n    DEPLOY_TYPE: review\n  environment:\n    name: review/$CI_COMMIT_REF_SLUG\n    url: https://$CI_COMMIT_REF_SLUG.about.gitlab-review.app\n    on_stop: review-stop\n    auto_stop_in: 30 days\n```\n\nThe deployment of the www-gitlab-com project [uses buckets in Google Cloud](https://gitlab.com/gitlab-com/www-gitlab-com/-/blob/91d6fd72a424a3d913e79ebc2aefb23bbab85863/scripts/deploy) that serve the website content in the review app. There are different types of web applications that require different deployment methods - as long as the environment URL variable is available in CI/CD and the deployment URL is accessible from the GitLab Runner executing the CI/CD job, you can continously test web apps with Hurl!\n\n![Hurl test in GitLab CI/CD for review app environments](https://about.gitlab.com/images/blogimages/hurl-continuous-website-testing/hurl_gitlab_cicd_review_app_environment_tests_www-gitlab-com.png)\n\n## Development tips\n\nUse the [`--verbose` parameter](https://hurl.dev/docs/tutorial/debug-tips.html) to see the full request and response flow. Hurl also provides tips which `curl` command could be run to fetch more data. This can be helpful when starting to use or develop a new REST API, or learning to understand the JSON structure of HTTP responses. Chaining the `curl` command with `jq` (the `curl ... | jq` pattern) can still be helpful to fetch data, and build the HTTP tests in a second terminal or editor window.\n\n```sh\n$ curl -s 'https://gitlab.com/api/v4/projects' | jq\n$ curl -s 'https://gitlab.com/api/v4/projects' | jq -c '.[]' | jq\n\n{\"id\":41375401,\"description\":\"An example project for a GitLab pipeline.\",\"name\":\"Calculator\",\"name_with_namespace\":\"Iva Tee / Calculator\",\"path\":\"calculator\",\"path_with_namespace\":\"snufkins_hat/calculator\",\"created_at\":\"2022-11-26T00:32:33.825Z\",\"default_branch\":\"master\",\"tag_list\":[],\"topics\":[],\"ssh_url_to_repo\":\"git@gitlab.com:snufkins_hat/calculator.git\",\"http_url_to_repo\":\"https://gitlab.com/snufkins_hat/calculator.git\",\"web_url\":\"https://gitlab.com/snufkins_hat/calculator\",\"readme_url\":\"https://gitlab.com/snufkins_hat/calculator/-/blob/master/README.md\",\"avatar_url\":null,\"forks_count\":0,\"star_count\":0,\"last_activity_at\":\"2022-11-26T00:32:33.825Z\",\"namespace\":{\"id\":58849237,\"name\":\"Iva Tee\",\"path\":\"snufkins_hat\",\"kind\":\"user\",\"full_path\":\"snufkins_hat\",\"parent_id\":null,\"avatar_url\":\"https://secure.gravatar.com/avatar/a3efe834950275380d5f19c9b17c922c?s=80&d=identicon\",\"web_url\":\"https://gitlab.com/snufkins_hat\"}}\n```\n\nThe GitLab projects API returns an array of elements, where we can inspect the `id` and `name` attributes for a simple test - the first element’s name must not be empty, the second element’s id needs to be greater than 0.\n\n```sh\n$ vim gitlab-com-api.hurl\n\nGET https://gitlab.com/api/v4/projects\n\nHTTP/2 200\n\n[Asserts]\njsonpath \"$[0].name\" != \"\"\njsonpath \"$[1].id\" > 0\n\n$ hurl --test gitlab-com-api.hurl\n\ngitlab-com-api.hurl: Running [1/1]\ngitlab-com-api.hurl: Success (1 request(s) in 728 ms)\n--------------------------------------------------------------------------------\nExecuted files:  1\nSucceeded files: 1 (100.0%)\nFailed files:    0 (0.0%)\nDuration:        730 ms\n```\n\n## More use cases\n\n- Work with HTTP sessions and [cookies](https://hurl.dev/docs/request.html#cookies), test [forms with parameters](https://hurl.dev/docs/request.html#form-parameters).\n- Review existing API tests of your applications.\n- Build advanced chained workflows with GET, POST, PUT, DELETE, and more HTTP methods.\n- Integrate simple ping/HTTP monitoring health checks into the DevSecOps Platform using alerts and incident management.\n\nIf the Hurl checks cannot be integrated directly inside the project where the application is developed and deployed, another idea could be to create a standalone GitLab project that has CI/CD pipeline schedules enabled. It can continuously run the Hurl tests, and parse the reports or trigger an event when the pipeline is failing, and [create an alert](https://docs.gitlab.com/ee/operations/incident_management/alerts.html) by sending a JSON payload from the Hurl results to the [HTTP endpoint](https://docs.gitlab.com/ee/operations/incident_management/integrations.html#single-http-endpoint). Developers can send MRs to update the Hurl tests, and maintainers review and approve the new test suites being rolled out into production. Alternatively, move the complete CI/CD configuration into a group/project with different permissions, and specify the CI/CD configuration as remote URL in the web application project. This compliance level helps to control who can make changes to important tests and CI/CD configuration.\n\nHurl supports `--json` as parameter to only return the JSON formatted test result and build own custom reports and integrations.\n\n```sh\n$ echo -e \"GET https://about.gitlab.com/teamops/\\n\\nHTTP/2 200\" | hurl --json | jq\n```\n\nFor folks in DevRel, monitoring certain websites for keywords or checking APIs whether values increase a certain threshold can be interesting. Here is an example for monitoring Hacker News using the Algolia search API, inspired by the [Zapier integration used for GitLab Slack](/handbook/marketing/developer-relations/workflows-tools/zapier/#zaps-for-hacker-news). The `QueryStringParams` section allows users to define the query parameters as a readable list, which is easier to modify. The `jsonpath` checks searches for the `hits` key and its count being zero (not on the Hacker News front page means OK in this example).\n\n```\n$ vim hackernews.hurl\n\nGET https://hn.algolia.com/api/v1/search\n[QueryStringParams]\nquery: gitlab\n#query: hurl\ntags: front_page\n\nHTTP/2 200\n\n[Asserts]\njsonpath \"$.hits\" count == 0\n\n$ hurl --test hackernews.hurl\n```\n\n## Limitations\n\nHurl works great for testing websites and web applications that serve static content, and by sending different HTTP request types, data, etc., and ensuring that responses match expectations. Compared to other end-to-end testing solutions (Selenium, etc.), Hurl does not provide a JavaScript engine and only can parse the raw DOM or JSON response. It does not support a DOM managed and rendered by JavaScript front-end frameworks. UI integration tests also need to be performed with different tools, similar to full end-to-end test workflows. Other examples are [accessibility testing](https://docs.gitlab.com/ee/ci/testing/accessibility_testing.html) and [browser performance testing](https://docs.gitlab.com/ee/ci/testing/browser_performance_testing.html). If you are curious how end-to-end testing is done for GitLab, the product, peek into the [development documentation](https://docs.gitlab.com/ee/development/testing_guide/end_to_end/).\n\n## Conclusion\n\nHurl provides an easy way to test HTTP endpoints (such as websites and APIs) in a fast and reliable way. The CLI commands can be integrated into CI/CD workflows, and the configuration syntax and files provide a single source of truth for everything. Additional support for JUnit report formats ensure that website testing is fully integrated into the [DevSecOps](https://about.gitlab.com/topics/devsecops/) platform, and increases visibility and extensibility with automating tests, and monitoring. There are known limitations with dynamic JavaScript websites and advanced UI/end-to-end testing workflows.\n\nHurl is open source, [created and maintained by Orange](https://opensource.orange.com/en/open-source-orange/), and written in Rust. This blog post inspired contributions to the [Debian/Ubuntu installation documentation](https://github.com/Orange-OpenSource/hurl/pull/1084) and [default issue templates](https://github.com/Orange-OpenSource/hurl/pull/1083).\n\n**Tip:** Practice using Hurl on the command line, and remember it when the next production incident shows a strange API behavior with POST requests.\n\nThanks to [Lee Tickett](/company/team/#leetickett-gitlab) who inspired me to test Hurl in GitLab CI/CD and write this blog post after seeing huge interest in a [Twitter share](https://twitter.com/dnsmichi/status/1595820546062778369).\n\nCover image by [Aaron Burden](https://unsplash.com/@aaronburden) on [Unsplash](https://unsplash.com)\n{: .note}\n",[1328,832,894],{"slug":2115,"featured":6,"template":678},"how-to-continously-test-web-apps-apis-with-hurl-and-gitlab-ci-cd","content:en-us:blog:how-to-continously-test-web-apps-apis-with-hurl-and-gitlab-ci-cd.yml","How To Continously Test Web Apps Apis With Hurl And Gitlab Ci Cd","en-us/blog/how-to-continously-test-web-apps-apis-with-hurl-and-gitlab-ci-cd.yml","en-us/blog/how-to-continously-test-web-apps-apis-with-hurl-and-gitlab-ci-cd",{"_path":2121,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2122,"content":2128,"config":2134,"_id":2136,"_type":16,"title":2137,"_source":17,"_file":2138,"_stem":2139,"_extension":20},"/en-us/blog/new-default-container-image-gitlab-saas-linux-runnners",{"title":2123,"description":2124,"ogTitle":2123,"ogDescription":2124,"noIndex":6,"ogImage":2125,"ogUrl":2126,"ogSiteName":692,"ogType":693,"canonicalUrls":2126,"schema":2127},"Using Ruby 3.1 as default on GitLab SaaS Linux runners","Learn about the new image and how to ensure CI job compatibility.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670766/Blog/Hero%20Images/container-reg-cdn-blog.jpg","https://about.gitlab.com/blog/new-default-container-image-gitlab-saas-linux-runnners","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use Ruby 3.1 as the default container image on GitLab SaaS Runners on Linux\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darren Eastman\"}],\n        \"datePublished\": \"2022-12-13\",\n      }",{"title":2129,"description":2124,"authors":2130,"heroImage":2125,"date":2131,"body":2132,"category":14,"tags":2133},"How to use Ruby 3.1 as the default container image on GitLab SaaS Runners on Linux",[1544],"2022-12-13","\nOn January 12, 2023, we will change the [default container](https://docs.gitlab.com/ee/ci/runners/saas/linux_saas_runner.html) image used on GitLab Saas Runners on Linux from Ruby 2.5, which is end of life, to Ruby 3.1.\n\nIf you have specified a container image in your CI/CD job, then there is no impact to you. In other words, your GitLab SaaS CI/CD job will only run in the default container if no image is set for the job in the `.gitlab-ci.yml` pipeline file.\n\nTo check, open the log view of a CI job and note the image used. For example, if you have not added an image to your CI job on GitLab SaaS, then the job log will have the following:\n\n```\nUsing Docker executor with image ruby:2.5 ...\n\n```\n\nIf you have not set a container image in your CI job, then after this change, the job will run in a Ruby 3.1 container.\n\n## How can I check for any build issues on Ruby 3.1?\n\nWhile it is not expected that running a CI/CD job on Ruby 2.5 is incompatible with Ruby 3.1, to check, simply configure the job to run in a Ruby 3.1 container. To do so, edit the `.gitlab-ci.yml` and add the following:\n\n```\ndefault:\n  image: ruby:3.1\n```\n\n## Future plans\n\nIn addition to this change, we plan to [define](https://gitlab.com/gitlab-org/gitlab/-/issues/384992) a new container image maintenance process for GitLab SaaS Runners on Linux. The new policy aims to ensure that the default image used is updated so that it contains the latest security fixes.\n\n_This blog post and linked pages contain information related to upcoming products, features, and functionality. It is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog post and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc._\n\n",[894,915,832,937,232],{"slug":2135,"featured":6,"template":678},"new-default-container-image-gitlab-saas-linux-runnners","content:en-us:blog:new-default-container-image-gitlab-saas-linux-runnners.yml","New Default Container Image Gitlab Saas Linux Runnners","en-us/blog/new-default-container-image-gitlab-saas-linux-runnners.yml","en-us/blog/new-default-container-image-gitlab-saas-linux-runnners",{"_path":2141,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2142,"content":2148,"config":2154,"_id":2156,"_type":16,"title":2157,"_source":17,"_file":2158,"_stem":2159,"_extension":20},"/en-us/blog/top-10-technical-articles-of-2022",{"title":2143,"description":2144,"ogTitle":2143,"ogDescription":2144,"noIndex":6,"ogImage":2145,"ogUrl":2146,"ogSiteName":692,"ogType":693,"canonicalUrls":2146,"schema":2147},"Top 10 technical articles of 2022","Let’s review our fantastic year of how-to guides. From fixing failed pipelines to making the best use of GitOps, we have you covered with our in-depth tutorials.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663736/Blog/Hero%20Images/a-deep-dive-into-the-security-analyst-persona.jpg","https://about.gitlab.com/blog/top-10-technical-articles-of-2022","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Top 10 technical articles of 2022\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-12-08\",\n      }",{"title":2143,"description":2144,"authors":2149,"heroImage":2145,"date":2151,"body":2152,"category":14,"tags":2153},[2150],"Valerie Silverthorne","2022-12-08","\nWith 2022 coming to a close, we wanted to ensure everyone gets one more chance to explore our top 10 technical blog posts of the year. Roll up your sleeves and enjoy our most-viewed how-to articles and don’t forget to bookmark them for next year!\n\n## 1. Failed pipeline? \n\nWe have *all* been there, and not much is more frustrating than that red X. Staff Developer Evangelist [Brendan O’Leary](/company/team/#brendan) offers his best advice on troubleshooting the “why?” of a GitLab failed pipeline – it starts with keeping the right perspective. So many factors are involved in code development that it’s critical to ask all of the questions: Is it the code? Is it the test? Is it a vulnerability, etc.?\n\n[How to troubleshoot a GitLab pipeline failure](/blog/how-to-troubleshoot-a-gitlab-pipeline-failure/)\n\n## 2. Why Git Rebase is your BFF\n\nWith code review increasingly important to successful DevOps, Senior Backend Engineer (Gitaly) [Christian Couder](/company/team/#chriscool) thinks devs might be forgetting a secret weapon in their IDE: Git Rebase. Learn how to rework commits with Git Rebase, including expert tips to try different instructions like ‘reword’, ‘edit’, and ‘squash’.\n\n[Take advantage of Git Rebase](/blog/take-advantage-of-git-rebase/)\n\n## 3. Alert fatigue is real\n\nFollow along with Senior Site Reliability Engineer [Steve Azzopardi](/company/team/#steveazz) as he lays out a GitLab investigation into annoying, time-consuming (and customer-facing) 502 errors in the GitLab Pages logs. To uncover the problem, Azzopardi and team had to unearth some red herrings along the way, but ultimately discovered the importance of PID 1 in a container.\n\n[How we reduced 502 errors by caring about PID 1 in containers](/blog/how-we-removed-all-502-errors-by-caring-about-pid-1-in-kubernetes/)\n\n## 4. More pipelines = less complexity\n\nCI/CD is at the heart of most modern DevOps practices, but that doesn’t mean it’s a “set it and forget it.” Staff Backend Engineer Fabio Pittino acknowledges the complexity challenges of CI/CD and suggests the solution is choosing the right pipelines for the job. Understand the differences between parent-child and multi-project pipelines to streamline your CI/CD efforts.\n\n[Breaking down CI/CD complexity with parent-child and multi-project pipelines](/blog/parent-child-vs-multi-project-pipelines/)\n\n## 5. Hacking and bug bounties\n\nHow did a Swedish web developer go from zero to number seven on our HackerOne Top 10 list in just over a year? Johan Carlsson offers a detailed look at how and why he started looking for bugs in GitLab in his spare time, and how others can jump into hacking, too.\n\n[Want to start hacking? Here’s how to quickly dive in](/blog/cracking-our-bug-bounty-top-10/)\n\n## 6. Gitlab… on an iPad\n\nYes, you can code on an M1-chip-based iPad, and Staff Developer Evangelist Brendan O’Leary walks through all the necessary steps to get GitLab running using GitPod.\n\n[How to code, build, and deploy from an iPad using GitLab and GitPod](/blog/how-to-code-build-and-deploy-from-an-ipad-using-gitlab-and-gitpod/)\n\n## 7. Speed up database changes\n\nMany DevOps teams have mastered speedy application code changes but have struggled to make database updates equally streamlined. In this step-by-step guide, you’ll learn how to apply DevOps principles to database change management.\n\n[How to bring DevOps to the database with GitLab and Liquibase](/blog/how-to-bring-devops-to-the-database-with-gitlab-and-liquibase/)\n\n## 8. A primer on IaC security\n\nInfrastructure as Code (IaC) is an increasingly popular solution for DevOps teams, and with good reason: It’s an efficient and low-resource solution. But, as Senior Developer Evangelist [Michael Friedrich](/company/team/#dnsmichi) explains, it’s also ripe with potential security vulnerabilities. Friedrich takes an exhaustive look at the threats, tools, integrations, and strategies that make IaC a safer choice.\n\n[Fantastic Infrastructure as Code security attacks and how to find them](/blog/fantastic-infrastructure-as-code-security-attacks-and-how-to-find-them/)\n\n## 9. Everything you need to know about GitOps \n\nWant to know how to make GitLab work with GitOps? Senior Product Manager (Configure) [Viktor Nagy](/company/team/#nagyv-gitlab) created an eight-part tutorial covering everything GitLab and GitOps, culminating in how to make a GitLab agent for Kubernetes self-managing. \n\n[The ultimate guide to GitOps with GitLab](/blog/the-ultimate-guide-to-gitops-with-gitlab/)\n\n## 10. The skinny on static site generators\n\nDevs will get the most out of GitLab Pages by choosing the right static site generator (SSG). Developer Evangelist [Fatima Sarah Khalid](/company/team/#sugaroverflow) reviews six options and has also created a toolkit to help make the SSG evaluation process easier.\n\n[How to choose the right static site generator](/blog/comparing-static-site-generators/)\n\n",[894,726,702],{"slug":2155,"featured":6,"template":678},"top-10-technical-articles-of-2022","content:en-us:blog:top-10-technical-articles-of-2022.yml","Top 10 Technical Articles Of 2022","en-us/blog/top-10-technical-articles-of-2022.yml","en-us/blog/top-10-technical-articles-of-2022",{"_path":2161,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2162,"content":2168,"config":2174,"_id":2176,"_type":16,"title":2177,"_source":17,"_file":2178,"_stem":2179,"_extension":20},"/en-us/blog/how-we-diagnosed-and-resolved-redis-latency-spikes",{"title":2163,"description":2164,"ogTitle":2163,"ogDescription":2164,"noIndex":6,"ogImage":2165,"ogUrl":2166,"ogSiteName":692,"ogType":693,"canonicalUrls":2166,"schema":2167},"How we diagnosed and resolved Redis latency spikes with BPF and other tools","How we uncovered a three-phase cycle involving two distinct saturation points and a simple fix to break that cycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667913/Blog/Hero%20Images/clocks.jpg","https://about.gitlab.com/blog/how-we-diagnosed-and-resolved-redis-latency-spikes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we diagnosed and resolved Redis latency spikes with BPF and other tools\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matt Smiley\"}],\n        \"datePublished\": \"2022-11-28\",\n      }",{"title":2163,"description":2164,"authors":2169,"heroImage":2165,"date":2171,"body":2172,"category":14,"tags":2173},[2170],"Matt Smiley","2022-11-28","\n\nIf you enjoy performance engineering and peeling back abstraction layers to ask underlying subsystems to explain themselves, this article’s for you. The context is a chronic Redis latency problem, and you are about to tour a practical example of using BPF and profiling tools in concert with standard metrics to reveal unintuitive behaviors of a complex system.\n\nBeyond the tools and techniques, we also use an iterative hypothesis-testing approach to compose a behavior model of the system dynamics. This model tells us what factors influence the problem's severity and triggering conditions.\n\nUltimately, we find the root cause, and its remedy is delightfully boring and effective. We uncover a three-phase cycle involving two distinct saturation points and a simple fix to break that cycle. Along the way, we inspect aspects of the system’s behavior using stack sampling profiles, heat maps and flamegraphs, experimental tuning, source and binary analysis, instruction-level BPF instrumentation, and targeted latency injection under specific entry and exit conditions.\n\nIf you are short on time, the takeaways are summarized at the end. But the journey is the fun part, so let's dig in!\n\n## Introducing the problem: Chronic latency \n\nGitLab makes extensive use of Redis, and, on GitLab.com SaaS, we use [separate Redis clusters](/handbook/engineering/infrastructure/production/architecture/#redis-architecture) for certain functions. This tale concerns a Redis instance acting exclusively as a least recently used (LRU) cache.\n\nThis cache had a chronic latency problem that started occurring intermittently over two years ago and in recent months had become significantly worse: Every few minutes, it suffered from bursts of very high latency and corresponding throughput drop, eating into its Service Level Objective (SLO). These latency spikes impacted user-facing response times and [burned error budgets](https://gitlab.com/gitlab-org/gitlab/-/issues/360578#note_966597336) for dependent features, and this is what we aimed to solve.\n\n**Graph:** Spikes in the rate of extremely slow (1 second) Redis requests, each corresponding to an eviction burst\n\n![Graph showing spikes in the slow request rate every few minutes](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/00_redis_slow_request_rate_spikes_during_each_eviction_burst.png)\n\nIn prior work, we had already completed several mitigating optimizations. These sufficed for a while, but organic growth had resurfaced this as an important [long-term scaling problem](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#why-is-it-important-to-get-to-the-root-of-the-latency-spikes). We had also already ruled out externally triggered causes, such as request floods, connection rate spikes, host-level resource contention, etc. These latency spikes were consistently associated with memory usage reaching the eviction threshold (`maxmemory`), not by changes in client traffic patterns or other processes competing with Redis for CPU time, memory bandwidth, or network I/O.\n\nWe [initially thought](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1567) that Redis 6.2’s new [eviction throttling mechanism](https://github.com/redis/redis/pull/7653) might alleviate our eviction burst overhead. It did not. That mechanism solves a different problem: It prevents a stall condition where a single call to `performEvictions` could run arbitrarily long. In contrast, during this analysis we [discovered](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_977816216) that our problem (both before and after upgrading Redis) was related to numerous calls collectively reducing Redis throughput, rather than a few extremely slow calls causing a complete stall.\n\nTo discover our bottleneck and its potential solutions, we needed to investigate Redis’s behavior during our workload’s eviction bursts.\n\n## A little background on Redis evictions\n\nAt the time, our cache was oversubscribed, trying to hold more cache keys than the [configured `maxmemory` threshold](https://redis.io/docs/reference/eviction/) could hold, so evictions from the LRU cache were expected. But the dense concentration of that eviction overhead was surprising and troubling.\n\nRedis is essentially single-threaded. With a few exceptions, the “main” thread does almost all tasks serially, including handling client requests and evictions, among other things. Spending more time on X means there is less remaining time to do Y, so think about queuing behavior as the story unfolds.\n\nWhenever Redis reaches its `maxmemory` threshold, it frees memory by evicting some keys, aiming to do just enough evictions to get back under `maxmemory`. However, contrary to expectation, the metrics for memory usage and eviction rate (shown below) indicated that instead of a continuous steady eviction rate, there were abrupt burst events that freed much more memory than expected. After each eviction burst, no evictions occurred until memory usage climbed back up to the `maxmemory` threshold again.\n\n**Graph:** Redis memory usage drops by 300-500 MB during each eviction burst:\n\n![Memory usage repeatedly rises gradually to 64 GB and then abruptly drops](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/01_redis_memory_usage_dips_during_eviction_bursts.png)\n\n**Graph:** Key eviction spikes match the timing and size of the memory usage dips shown above\n\n![Eviction counter shows a large spike each time the previous graph showed a large memory usage drop](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/02_redis_eviction_bursts.png)\n\nThis apparent excess of evictions became the central mystery. Initially, we thought answering that question might reveal a way to smooth the eviction rate, spreading out the overhead and avoiding the latency spikes. Instead, we discovered that these bursts are an interaction effect that we need to avoid, but more on that later.\n\n## Eviction bursts cause CPU saturation\n\nAs shown above, we had found that these latency spikes correlated perfectly with large spikes in the cache’s eviction rate, but we did not yet understand why the evictions were concentrated into bursts that last a few seconds and occur every few minutes.\n\nAs a first step, we wanted to verify a causal relationship between eviction bursts and latency spikes.\n\nTo test this, we used [`perf`](https://www.brendangregg.com/perf.html) to run a CPU sampling profile on the Redis main thread. Then we applied a filter to split that profile, isolating the samples where it was calling the [`performEvictions` function](https://github.com/redis/redis/blob/6.2.6/src/evict.c#L512). Using [`flamescope`](https://github.com/Netflix/flamescope), we can visualize the profile’s CPU usage as a [subsecond offset heat map](https://www.brendangregg.com/HeatMaps/subsecondoffset.html), where each second on the X axis is folded into a column of 20 msec buckets along the Y axis. This visualization style highlights sub-second activity patterns. Comparing these two heat maps confirmed that during an eviction burst, `performEvictions` is starving all other main thread code paths for CPU time.\n\n**Graph:** Redis main thread CPU time, excluding calls to `performEvictions`\n\n![Heat map shows one large gap and two small gaps in an otherwise uniform pattern of 70 percent to 80 percent CPU usage](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/03_heat_map_of_redis_main_thread_during_eviction_burst__excluding_performEvictions.png)\n\n**Graph:** Remainder of the same profile, showing only the calls to `performEvictions`\n\n![This heat map shows the gaps in the previous heap map were CPU time spent performing evictions](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/04_heat_map_of_redis_main_thread_during_eviction_burst__only_performEvictions.png)\n\nThese results confirm that eviction bursts are causing CPU starvation on the main thread, which acts as a throughput bottleneck and increases Redis’s response time latency.  These CPU utilization bursts typically lasted a few seconds, so they were too short-lived to trigger alerts but were still user impacting.\n\nFor context, the following flamegraph shows where `performEvictions` spends its CPU time. There are a few interesting things here, but the most important takeaways are:\n* It gets called synchronously by `processCommand` (which handles all client requests).\n* It handles many of its own deletes. Despite its name, the `dbAsyncDelete` function only delegates deletes to a helper thread under certain conditions which turn out to be rare for this workload.\n\n![Flamegraph of calls to function performEvictions, as described above](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/05_flamegraph_of_redis_main_thread_during_eviction_burst__only_performEvictions.png)\n\nFor more details on this analysis, see the [walkthrough and methodology](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_854745083).\n\n## How fast are individual calls to `performEvictions`?\n\nEach incoming request to Redis is handled by a call to `processCommand`, and it always concludes by calling the `performEvictions` function. That call to `performEvictions` is frequently a no-op, returning immediately after checking that the `maxmemory` threshold has not been breached. But when the threshold is exceeded, it will continue evicting keys until it either reaches its `mem_tofree` goal or exceeds its configured time limit per call.\n\nThe CPU heat maps shown earlier proved that `performEvictions` calls were collectively consuming a large majority of CPU time for up to several seconds.\n\nTo complement that, we also measured the wall clock time of individual calls.\n\nUsing the `funclatency` CLI tool (part of the [BCC suite of BPF tools](https://github.com/iovisor/bcc)), we measured call duration by instrumenting entry and exit from the `performEvictions` function and aggregated those measurements into a [histogram](https://en.wikipedia.org/wiki/Histogram) at 1-second intervals. When no evictions were occurring, the calls were consistently low latency (4-7 usecs/call). This is the no-op case described above (including 2.5 usecs/call of instrumentation overhead). But during an eviction burst, the results shift to a bimodal distribution, including a combination of the fast no-op calls along with much slower calls that are actively performing evictions:\n\n```\n$ sudo funclatency-bpfcc --microseconds --timestamp --interval 1 --duration 600 --pid $( pgrep -o redis-server ) '/opt/gitlab/embedded/bin/redis-server:performEvictions'\n...\n23:54:03\n     usecs               : count     distribution\n         0 -> 1          : 0        |                                        |\n         2 -> 3          : 576      |************                            |\n         4 -> 7          : 1896     |****************************************|\n         8 -> 15         : 392      |********                                |\n        16 -> 31         : 84       |*                                       |\n        32 -> 63         : 62       |*                                       |\n        64 -> 127        : 94       |*                                       |\n       128 -> 255        : 182      |***                                     |\n       256 -> 511        : 826      |*****************                       |\n       512 -> 1023       : 750      |***************                         |\n```\n\nThis measurement also directly confirmed and quantified the throughput drop in Redis requests handled per second: The call rate to `performEvictions` (and hence to `processCommand`) dropped to 20% of its norm from before the evictions began, from 25K to 5K calls per second.\n\nThis has a huge impact on clients: New requests are arriving at 5x the rate they are being completed. And crucially, we will see soon that this asymmetry is what drives the eviction burst.\n\nFor more details on this analysis, see the [safety check](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_857869826) for instrumentation overhead and the [results walkthrough](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_857907521). And for more general reference, the BPF instrumentation overhead estimate is based on these [benchmark results](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1383).\n\n## Experiment: Can tuning mitigate eviction-driven CPU saturation?\n\nThe analyses so far had shown that evictions were severely starving the Redis main thread for CPU time. There were still important unanswered questions (which we will return to shortly), but this was already enough info to [suggest some experiments](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_859236777) to test potential mitigations:\n* Can we spread out the eviction overhead so it takes longer to reach its goal but consumes a smaller percentage of the main thread’s time?\n* Are evictions freeing more memory than expected due to scheduling a lot of keys to be asynchronously deleted by the [lazyfree mechanism](https://github.com/redis/redis/blob/6.2.6/redis.conf#L1079)? Lazyfree is an optional feature that lets the Redis main thread [delegate to an async helper thread](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_859236777) the expensive task of deleting keys that have more than 64 elements. These async evictions do not count immediately towards the eviction loop’s memory goal, so if many keys qualify for lazyfree, this could potentially drive many extra iterations of the eviction loop.\n\nThe [answers](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/7172#note_971197943) to both turned out to be no:\n* Reducing `maxmemory-eviction-tenacity` to its minimum setting still did not make `performEvictions` cheap enough to avoid accumulating a request backlog. It did increase response rate, but arrival rate still far exceeded it, so this was not an effective mitigation.\n* Disabling `lazyfree-lazy-eviction` did not prevent the eviction burst from dropping memory usage far below `maxmemory`. Those lazyfrees represent a small percentage of reclaimed memory. This rules out one of the potential explanations for the mystery of excessive memory being freed.\n\nHaving ruled out two potential mitigations and one candidate hypothesis, at this point we return to the pivotal question: Why are several hundred extra megabytes of memory being freed by the end of each eviction burst?\n\n## Why do evictions occur in bursts and free too much memory?\n\nEach round of eviction aims to free just barely enough memory to get back under the `maxmemory` threshold.\n\nWith a steady rate of demand for new memory allocations, the eviction rate should be similarly steady. The rate of arriving cache writes does appear to be steady. So why are evictions happening in dense bursts, rather than smoothly? And why do they reduce memory usage on a scale of hundreds of megabytes rather than hundreds of bytes?\n\nSome potential explanations to explore:\n* Do evictions only end when a large key gets evicted, spontaneously freeing enough memory to skip evictions for a while? No, the memory usage drop is far bigger than the largest keys in the dataset.\n* Do deferred lazyfree evictions cause the eviction loop to overshoot its goal, freeing more memory than intended? No, the above experiment disproved this hypothesis.\n* Is something causing the eviction loop to sometimes calculate an unexpectedly large value for its `mem_tofree` goal? We explore this next. The answer is no, but checking it led to a new insight.\n* Is a feedback loop causing evictions to become somehow self-amplifying? If so, what conditions lead to entering and leaving this state? This turned out to be correct.\n\nThese were all plausible and testable hypotheses, and each would point towards a different solution to the eviction-driven latency problem.\n\nThe first two hypotheses we have already eliminated.\n\nTo test the next two, we built custom BPF instrumentation to peek at the calculation of `mem_tofree` at the start of each call to `performEvictions`.\n\n## Observing the `mem_tofree` calculation with `bpftrace`\n\nThis part of the investigation was a personal favorite and led to a critical realization about the nature of the problem.\n\nAs noted above, our two remaining hypotheses were:\n* an unexpectedly large `mem_tofree` goal\n* a self-amplifying feedback loop\n\nTo differentiate between them, we used [`bpftrace`](https://github.com/iovisor/bpftrace) to instrument the calculation of `mem_tofree`, looking at its input variables and results.\n\nThis set of measurements directly tests the following:\n* Does each call to `performEvictions` aim to free a small amount of memory -- perhaps roughly the size of an average cache entry? If `mem_tofree` ever approaches hundreds of megabytes, that would confirm the first hypothesis and reveal what part of the calculation was causing that large value. Otherwise, it rules out the first hypothesis and makes the feedback loop hypothesis more likely.\n* Does the replication buffer size significantly influence `mem_tofree` as a feedback mechanism? Each eviction adds to this buffer, just like normal writes do. If this buffer grows large (possibly partly due to evictions) and then abruptly shrinks (due to the peer consuming it), that would cause a spontaneous large drop in memory usage, ending evictions and instantly reducing memory usage. This is one potential way for evictions to drive a feedback loop.\n\nTo peek at the values of the `mem_tofree` calculation ([script](https://gitlab.com/gitlab-com/gl-infra/scalability/uploads/cab2cd03231f8dd4819f77b44d768cb9/redis_snoop.getMaxmemoryState.sha_25a228b839a93a1395907a03f83e1eee448b0f14.production_thresholds.bt)), we needed to isolate the [correct call from `performEvictions`](https://github.com/redis/redis/blob/6.2.6/src/evict.c#L523) to the [`getMaxmemoryState`](https://github.com/redis/redis/blob/6.2.6/src/evict.c#L374-L407) function and reverse engineer its assembly to find the right instruction and register to instrument for each of the source code level variables that we wanted to capture. From that data we generate histograms for each of the following variables:\n\n```\nmem_reported = zmalloc_used_memory()        // All used memory tracked by jemalloc\noverhead = freeMemoryGetNotCountedMemory()  // Replication output buffers + AOF buffer\nmem_used = mem_reported - overhead          // Non-exempt used memory\nmem_tofree = mem_used - maxmemory           // Eviction goal\n```\n\n_Caveat:_ Our [custom BPF instrumentation](https://gitlab.com/gitlab-com/gl-infra/scalability/uploads/cab2cd03231f8dd4819f77b44d768cb9/redis_snoop.getMaxmemoryState.sha_25a228b839a93a1395907a03f83e1eee448b0f14.production_thresholds.bt) is specific to this particular build of the `redis-server` binary, since it attaches to virtual addresses that are likely to change the next time Redis is compiled. But the approach is able to be generalized. Treat this as a concrete example of using BPF to inspect source code variables in the middle of a function call without having to rebuild the binary. Because we are peeking at the function’s intermediate state and because the compiler inlined this function call, we needed to do binary analysis to find the correct instrumentation points. In general, peeking at a function’s arguments or return value is easier and more portable, but in this case it would not suffice.\n\nThe results:\n* Ruled out the first hypothesis: Each call to `performEvictions` had a small target value (`mem_tofree` \u003C 2 MB). This means each call to `performEvictions` did a small amount of work. Redis’s mysterious rapid drop in memory usage cannot have been caused by an abnormally large `mem_tofree` target evicting a big batch of keys all at once. Instead, there must be many calls collectively driving down memory usage.\n* The replication output buffers remained consistently small, ruling out one of the potential feedback loop mechanisms.\n* Surprisingly, `mem_tofree` was usually 16 KB to 64 KB, which is larger than a typical cache entry. This size discrepancy hints that cache keys may not be the main source of the memory pressure perpetuating the eviction burst once it begins.\n\nAll of the above results were consistent with the feedback loop hypothesis.\n\nIn addition to answering the initial questions, we got a bonus outcome: Concurrently measuring both `mem_tofree` and `mem_used` revealed a crucial new fact – _the memory reclaim is a completely distinct phase from the eviction burst_.\n\nReframing the pathology as exhibiting separate phases for evictions versus memory reclaim led to a series of realizations, described in the next section. From that emerged a coherent hypothesis explaining all the observed properties of the pathology.\n\nFor more details on this analysis, see [methodology notes](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_982498636), [build notes](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_982499538) supporting the disassembly of the Redis binary, and [initial interpretations](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_977994182).\n\n## Three-phase cycle\n\nWith the above results indicating a distinct separation between the evictions and the memory reclaim, we can now concisely characterize [three phases](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_982623949) in the cycle of eviction-driven latency spikes.\n\n**Graph:** Diagram (not to scale) comparing memory and CPU usage to request and response rates during each of the three phases\n\n![Diagram summarizes the text that follows, showing CPU and memory saturate in Phase 2 until request rate drops to match response rate, after which they recover](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/06_3_phase_cycle_of_eviction_bursts.png)\n\nPhase 1: Not saturated (7-15 minutes)\n* Memory usage is below `maxmemory`. No evictions occur during this phase.\n* Memory usage grows organically until reaching `maxmemory`, which starts the next phase.\n\nPhase 2: Saturated memory and CPU (6-8 seconds)\n* When memory usage reaches `maxmemory`, evictions begin.\n* Evictions occur only during this phase, and they occur intermittently and frequently.\n* Demand for memory frequently exceeds free capacity, repeatedly pushing memory usage above `maxmemory`. Throughout this phase, memory usage oscillates close to the `maxmemory` threshold, evicting a small amount of memory at a time, just enough to get back under `maxmemory`.\n\nPhase 3: Rapid memory reclaim (30-60 seconds)\n* No evictions occur during this phase.\n* During this phase, something that had been holding a lot of memory starts quickly and steadily releasing it.\n* Without the overhead of running evictions, CPU time is again spent mostly on handling requests (starting with the backlog that accumulated during Phase 2).\n* Memory usage drops rapidly and steadily. By the time this phase ends, hundreds of megabytes have been freed. Afterwards, the cycle restarts with Phase 1.\n\nAt the transition between Phase 2 and Phase 3, evictions abruptly ended because memory usage stays below the `maxmemory` threshold.\n\nReaching that transition point where memory pressure becomes negative signals that whatever was driving the memory demand in Phase 2 has started releasing memory faster than it is consuming it, shrinking the footprint it had accumulated during the previous phase.\n\nWhat is this **mystery memory consumer** that bloats its demand during Phase 2 and frees it during Phase 3?\n\n## The mystery revealed\n\n[Modeling the phase transitions](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_982651298) gave us some useful constraints that a viable hypothesis must satisfy. The mystery memory consumer must:\n* quickly bloat its footprint to hundreds of megabytes on a timescale of less than 10 seconds (the duration of Phase 2), under conditions triggered by the start of an eviction burst\n* quickly release its accumulated excess on a timescale of just tens of seconds (the duration of Phase 3), under the conditions immediately following an eviction burst\n\n**The answer:** The client input/output buffers meet those constraints to be the mystery memory consumer.\n\nHere is how that hypothesis plays out:\n* During Phase 1 (healthy state), the Redis main thread’s CPU usage is already fairly high. At the start of Phase 2, when evictions begin, the eviction overhead saturates the main thread’s CPU capacity, quickly dropping response rate below the incoming request rate.\n* This throughput mismatch between arrivals versus responses **is itself the amplifier** that takes over driving the eviction burst. As the size of that rate gap increases, the proportion of time spent doing evictions also increases.\n* Accumulating a backlog of requests requires memory, and that backlog continues to grow until enough clients are stalled that the arrival rate drops to match the response rate. As clients stall, the arrival rate falls, and with it the memory pressure, eviction rate, and CPU overhead begin to reduce.\n* At the equilibrium point when arrival rate falls to match response rate, memory demand is satisfied and evictions stop (ending Phase 2). Without the eviction overhead, more CPU time is available to process the backlog, so response rate increases above request arrival rate. This recovery phase steadily consumes the request backlog, incrementally freeing memory as it goes (Phase 3).\n* Once the backlog is resolved, the arrival and response rates match again. CPU usage is back to its Phase 1 norm, and memory usage has temporarily dropped in proportion to the max size of Phase 2’s request backlog.\n\nWe confirmed this hypothesis via a [latency injection experiment](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_987049036) showing that queuing alone explains the pathology. This outcome supports the conclusion that the extra memory demand originates from response rate falling below request arrival rate.\n\n## Remedies: How to avoid entering the eviction burst cycle\n\nNow that we understand the dynamics of the pathology, we can draw confident conclusions about viable solutions.\n\nRedis evictions are only self-amplifying when all of the following conditions are present:\n* **Memory saturation:** Memory usage reaches the `maxmemory` limit, causing evictions to start.\n* **CPU saturation:** The baseline CPU usage by the Redis main thread’s normal workload is close enough to a whole core that the eviction overhead pushes it to saturation. This reduces the response rate below request arrival rate, inducing self-amplification via increased memory demand for request buffering.\n* **Many active clients:** The saturation only lasts as long as request arrival rate exceeds response rate. Stalled clients no longer contribute to that arrival rate, so the saturation lasts longer and has a greater impact if Redis has many active clients still sending requests.\n\nViable remedies include:\n* Avoid memory saturation by any combination of the following to make peak memory usage less than the `maxmemory` limit:\n  * Reduce cache time to live (TTL)\n  * Increase `maxmemory` (and host memory if needed, but watch out for [`numa_balancing` CPU overhead](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1889) on hosts with multiple NUMA nodes)\n  * Adjust client behavior to avoid writing unnecessary cache entries\n  * Split the cache among multiple instances (sharding or functional partitioning, helps avoid both memory and CPU saturation)\n* Avoid CPU saturation by any combination of the following to make peak CPU usage for the workload plus eviction overhead be less than 1 CPU core:\n  * Use the fastest processor available for single-threaded instructions per second\n  * Isolate the redis-server process (particularly its main thread) from any other competing CPU-intensive processes (dedicated host, taskset, cpuset)\n  * Adjust client behavior to avoid unnecessary cache lookups or writes\n  * Split the cache among multiple instances (sharding or functional partitioning, helps avoid both memory and CPU saturation)\n  * Offload work from the Redis main thread (io-threads, lazyfree)\n  * Reduce eviction tenacity (only gives a minor benefit in our experiments)\n\nMore exotic potential remedies could include a new Redis feature. One idea is to exempt ephemeral allocations like client buffers from counting towards the `maxmemory` limit, instead applying that limit only to key storage. Alternatively, we could limit evictions to only consume at most a configurable percentage of the main thread’s time, so that most of its time is still spent on request throughput rather than eviction overhead.\n\nUnfortunately, either of those features would trade one failure mode for another, reducing the risk of eviction-driven CPU saturation while increasing the risk of unbounded memory growth at the process level, which could potentially saturate the host or cgroup and lead to an OOM, or out of memory, kill. That trade-off may not be worthwhile, and in any case it is not currently an option.\n\n## Our solution\n\nWe had already exhausted the low-hanging fruit for CPU efficiency, so we focused our attention on avoiding memory saturation.\n\nTo improve the cache’s memory efficiency, we [evaluated](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_990891708) which types of cache keys were using the most space and how much [`IDLETIME`](https://redis.io/commands/object-idletime/) they had accrued since last access. This memory usage profile identified some rarely used cache entries (which waste space), helped inform the TTL, or time to live, tuning by first focusing on keys with a high idle time, and highlighted some useful potential cutpoints for functionally partitioning the cache.\n\nWe [decided](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_1014582669) to concurrently pursue several cache efficiency improvements and opened an [epic](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/764) for it. The goal was to avoid chronic memory saturation, and the main action items were:\n* Iteratively reduce the cache’s [default TTL](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1854) from 2 weeks to 8 hours (helped a lot!)\n* Switch to [client-side caching](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_1026821730) for certain cache keys (efficiently avoids spending shared cache space on non-shared cache entries)\n* [Partition](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/762) a set of cache keys to a separate Redis instance\n\nThe TTL reduction was the simplest solution and turned out to be a big win. One of our main concerns with TTL reduction was that the additional cache misses could potentially increase workload on other parts of the infrastructure. Some cache misses are more expensive than others, and our metrics are not granular enough to quantify the cost of cache misses per type of cache entry. This concern is why we applied the TTL adjustment incrementally and monitored for SLO violations. Fortunately, our inference was correct: Reducing TTL did not significantly reduce the cache hit rate, and the additional cache misses did not cause noticeable impact to downstream subsystems.\n\nThe TTL reduction turned out to be sufficient to drop memory usage consistently a little below its saturation point.\n\nIncreasing `maxmemory` had initially not been feasible because the original peak memory demand (prior to the efficiency improvements) was expected to be larger than the max size of the VMs we use for Redis. However, once we dropped memory demand below saturation, then we could confidently [provision headroom](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1868) for future growth and re-enable [saturation alerting](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1883).\n\n## Results\n\nThe following graph shows Redis memory usage transitioning out of its chronically saturated state, with annotations describing the milestones when latency spikes ended and when saturation margin became wide enough to be considered safe:\n\n![Redis memory usage stops showing a flat top saturation](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/07_epic_results__memory_saturation_avoided_by_TTL_reductions.png)\n\nZooming into the days when we rolled out the TTL adjustments, we can see the harmful eviction-driven latency spikes vanish as we drop memory usage below its saturation point, exactly as predicted:\n\n![Redis memory usage starts as a flat line and then falls below that saturation line](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/08_results__redis_memory_usage_stops_saturating.png)\n\n![Redis response time spikes stop occurring at the exact point when memory stops being saturated](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/09_results__redis_latency_spikes_end.png)\n\nThese eviction-driven latency spikes had been the biggest cause of slowess in Redis cache.\n\nSolving this source of slowness significantly improved the user experience. This 1-year lookback shows only the long-tail portion of the improvement, not even the full benefit.  Each weekday had roughly 2 million Redis requests slower than 1 second, until our fix in mid-August:\n\n![Graph of the daily count of Redis cache requests slower than 1 second, showing roughly 2 million slow requests per day on weekdays until mid-August, when the TTL adjustments were applied](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/10_results__1_year_retrospective_of_slow_redis_requests_per_day.png)\n\n## Conclusions\n\nWe solved a long-standing latency problem that had been worsening as the workload grew, and we learned a lot along the way. This article focuses mostly on the Redis discoveries, since those are general behaviors that some of you may encounter in your travels. We also developed some novel tools and analytical methods and uncovered several useful environment-specific facts about our workload, infrastructure, and observability, leading to several additional improvements and proposals not mentioned above.\n\nOverall, we made several efficiency improvements and broke the cycle that was driving the pathology. Memory demand now stays well below the saturation point, eliminating the latency spikes that were burning error budgets for the development teams and causing intermittent slowness for users. All stakeholders are happy, and we came away with deeper domain knowledge and sharper skills!\n\n## Key insights summary\n\nThe following notes summarize what we learned about Redis eviction behavior (current as of version 6.2):\n* The same memory budget (`maxmemory`) is shared by key storage and client connection buffers. A spike in demand for client connection buffers counts towards the `maxmemory` limit, in the same way that a spike in key inserts or key size would.\n* Redis performs evictions in the foreground on its main thread. All time spent in `performEvictions` is time not spent handling client requests. Consequently, during an eviction burst, Redis has a lower throughput ceiling.\n* If eviction overhead saturates the main thread’s CPU, then response rate falls below request arrival rate. Redis accumulates a request backlog (which consumes memory), and clients experience this as slowness.\n* The memory used for pending requests requires more evictions, driving the eviction burst until enough clients are stalled that arrival rate falls back below response rate. At that equilibrium point, evictions stop, eviction overhead vanishes, Redis rapidly handles its request backlog, and that backlog’s memory gets freed.\n* Triggering this cycle requires all of the following:\n  * Redis is configured with a `maxmemory` limit, and its memory demand exceeds that size. This memory saturation causes evictions to begin.\n  * Redis main thread’s CPU utilization is high enough under its normal workload that having to also perform evictions drives it to CPU saturation. This reduces response rate below request rate, causing a growing request backlog and high latency.\n  * Many active clients are connected. The duration of the eviction burst and the size of memory spent on client connection buffers increases proportionally to the number of active clients.\n* Prevent this cycle by avoiding either memory or CPU saturation. In our case, avoiding memory saturation was easier (mainly by reducing cache TTL).\n\n## Further reading\n\nThe following lists summarize the analytical tools and methods cited in this article. These tools are all highly versatile and any of them can provide a massive level-up when working on performance engineering problems.\n\nTools:\n* [perf](https://www.brendangregg.com/perf.html) - A Linux performance analysis multitool. In this article, we used `perf` as a sampling profiler, capturing periodic stack traces of the `redis-server` process's main thread when it is actively running on a CPU.\n* [Flamescope](https://github.com/Netflix/flamescope) - A visualization tool for rendering a `perf` profile (and other formats) into an interactive subsecond heat map. This tool invites the user to explore the timeline for microbursts of activity or inactivity and render flamegraphs of those interesting timespans to explore what code paths were active.\n* [BCC](https://github.com/iovisor/bcc) - BCC is a framework for building BPF tools, and it ships with many useful tools out of the box. In this article, we used `funclatency` to measure the call durations of a specific Redis function and render the results as a histogram.\n* [bpftrace](https://github.com/iovisor/bpftrace) - Another BPF framework, ideal for answering ad-hoc questions about your system's behavior. It uses an `awk`-like syntax and is [quick to learn](https://github.com/iovisor/bpftrace#readme). In this article, we wrote a [custom `bpftrace` script](https://gitlab.com/gitlab-com/gl-infra/scalability/uploads/cab2cd03231f8dd4819f77b44d768cb9/redis_snoop.getMaxmemoryState.sha_25a228b839a93a1395907a03f83e1eee448b0f14.production_thresholds.bt) for observing the variables used in computing how much memory to free during each round of evictions. This script's instrumentation points are specific to our particular build of `redis-server`, but the [approach is able to be generalized](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_982498636) and illustrates how versatile this tool can be.\n\nUsage examples:\n* [Example](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_854745083) - Walkthrough of using `perf` and `flamescope` to capture, filter, and visualize the stack sampling CPU profiles of the Redis main thread.\n* [Example](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_857869826) - Walkthrough (including safety check) of using `funclatency` to measure the durations of the frequent calls to function `performEvictions`.\n* [Example](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/7172#note_971197943) - Experiment for adjusting Redis settings `lazyfree-lazy-eviction` and `maxmemory-eviction-tenacity` and observing the results using `perf`, `funclatency`, `funcslower`, and the Redis metrics for eviction count and memory usage.\n* [Example](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_982498636) - This is a working example (script included) of using `bpftrace` to observe the values of a function's variables. In this case we inspected the `mem_tofree` calculation at the start of `performEvictions`. Also, these [companion notes](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_982499538) discuss some build-specific considerations.\n* [Example](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_987049036) - Describes the latency injection experiment (the first of the three ideas). This experiment confirmed that memory demand increases at the predicted rate when we slow response rate to below request arrival rate, in the same way evictions do. This result confirmed the request queuing itself is the source of the memory pressure that amplifies the eviction burst once it begins.\n",[704,726,894],{"slug":2175,"featured":6,"template":678},"how-we-diagnosed-and-resolved-redis-latency-spikes","content:en-us:blog:how-we-diagnosed-and-resolved-redis-latency-spikes.yml","How We Diagnosed And Resolved Redis Latency Spikes","en-us/blog/how-we-diagnosed-and-resolved-redis-latency-spikes.yml","en-us/blog/how-we-diagnosed-and-resolved-redis-latency-spikes",{"_path":2181,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2182,"content":2188,"config":2194,"_id":2196,"_type":16,"title":2197,"_source":17,"_file":2198,"_stem":2199,"_extension":20},"/en-us/blog/deploy-remix-with-gitlab-and-cloudflare",{"title":2183,"description":2184,"ogTitle":2183,"ogDescription":2184,"noIndex":6,"ogImage":2185,"ogUrl":2186,"ogSiteName":692,"ogType":693,"canonicalUrls":2186,"schema":2187},"How to publish a Remix app to the edge with GitLab and Cloudflare","Learn how to deploy a Remix app with GitLab and Cloudflare Workers.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682517/Blog/Hero%20Images/ryoji-hayasaka-0UZj73PQVew-unsplash.jpg","https://about.gitlab.com/blog/deploy-remix-with-gitlab-and-cloudflare","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to publish a Remix app to the edge with GitLab and Cloudflare\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Janis Altherr\"}],\n        \"datePublished\": \"2022-11-21\",\n      }",{"title":2183,"description":2184,"authors":2189,"heroImage":2185,"date":2191,"body":2192,"category":14,"tags":2193},[2190],"Janis Altherr","2022-11-21","\n\n[Remix](https://remix.run) has had a significant impact in the frontend space. \nAfter eons of backend frameworks providing some sort of frontend options that are more or \nless hated by frontend engineers, followed by frontend frameworks that \nrequired a separate API for the most simple tasks that were a pain to \nmaintain, now suddenly there are frontend frameworks that you can write\nbackend code with.\n\nThis is ideal as an application scales: Remix offers the comfort of writing \nserver-side code, but should the business logic start to exceed the \ncapabilities of Remix, it's easy to move code to an API on a per-request basis. \nThis comes without the need to rewrite the entire application logic, while \nstill retaining server-side-rendering or even pre-rendering capabilities!\n\nThe most performant way to deploy a Remix app is to the edge. This means \nthat small instances of your Remix app are run on a server close to the requesting\nuser. An edge network consists of hundreds of \nservers all over the world, so you can be sure the network latency for the \nuser stays low.\n\nCurrently the most popular edge service capable of running Remix apps are \nCloudflare Workers. Not only does Cloudflare offer a generous free tier, \nWorkers are also extremely easy to deploy using GitLab CI/CD. \nHere's how to create a Remix app and then deploy it to Cloudflare Workers.\n\n## Create your Remix app\n\nCreate your Remix app locally using:\n\n```bash\nnpx create-remix@latest \u003Cmy-app-name>\n```\n\nThe CLI will now guide you through a series of questions. Some of those you \nmay answer as you prefer, but answer the following questions as indicated \nbelow:\n\n```text\n? What type of app do you want to create? \n> choose \"Just the Basics\"\n\n? Where do you want to deploy? [...]\n> choose \"Cloudflare Workers\"\n\n? Do you want me to run `npm install`?\n> answer \"Yes\"\n```\n\nInitialize the repository and add the first commit:\n\n```shell\ngit init\ngit add .\ngit commit -m \"initial commit\"\n```\n\n## Create the project in GitLab\n\nYou can't push the code as we have yet to set up the remote repository.\nVisit GitLab and create a new project. When asked, select \"Create blank \nproject.\"\n\nIn the project setup dialog, select `Edge Computing` as the `Deployment \ntarget`. Choose the visibility level however you like as this affects your \nsource code visibility.\n\nMake sure you unset the checkbox next to **Initialize repository with a \nREADME**, otherwise GitLab will begin a new Git history that you will have to reconcile\nwith your existing local one.\n\nOnce the project is set up, follow the instructions on how to add an \nexisting repository – if you've followed the above instructions to the letter \nyou don't have an existing remote yet, so you can run this simplified set of \ncommands:\n\n```shell\ngit remote add origin \u003Cgit-project-url>\ngit push -u origin main\n```\n\n## Configure Cloudflare\n\nNow set up your Cloudflare account to enable deployments from GitLab. \n[Login](https://dash.cloudflare.com/login) or [create an account](https://dash.cloudflare.com/sign-up).\n\n### Subscribe to a Workers plan\n\nIf you are creating a Worker for the first time, you will have to sign up for a Workers plan in Cloudflare.\n\nIn the Cloudflare dashboard's left sidebar click the entry **Workers**. Let \nCloudflare guide you through the setup.\n\n![Screenshot: Signing up for Workers in Cloudflare](https://about.gitlab.com/images/blogimages/remix-cloudflare/workers_onboarding.png)\n\nOnce you're back to the Workers overview page, continue below.\n\n### Obtain an API token\n\nTo be able to deploy your Cloudflare Worker from a GitLab pipeline you will need\nan API token. To do so, log in to the Cloudflare dashboard, then open the [API \ntokens page](https://dash.cloudflare.com/profile/api-tokens) (or find it \nmanually via the **user icon** > **My Profile** > **Api Tokens**).\n\nClick **Create Token**. Find **Edit Cloudflare Workers**, click **use \ntemplate**.\n\n![Screenshot: Select API Token template \"Edit Cloudflare Workers\"](https://about.gitlab.com/images/blogimages/remix-cloudflare/api_token_template_selection.png)\n\nUnder **Account Resources** choose *Include* and your account name.\n\nUnder **Zone Resources** choose *Include*, *Specific Zone* and your site's \ndomain. If you haven't set up a domain, you can use a less specific rule \nsuch as *All zones from an account*, although we don't recommend doing this; the API token could potentially be used beyond its scope if you add more zones to your Cloudflare account later.\n\n![Screenshot: API Token Account and Zone Settings](https://about.gitlab.com/images/blogimages/remix-cloudflare/api_token_rules.png)\n\n**Note:** If you have more than one account associated with the API token used \nduring deployment, you will have to update your project's `wrangler.toml` file\nto use the correct account. [Read more in the Cloudflare documentation](https://developers.cloudflare.com/workers/wrangler/ci-cd/#account-id).\n\nOnce you're done setting up the API token, click **Continue to summary**, \nand verify your selections. It should look like this:\n\n![Screenshot: API Token Summary View](https://about.gitlab.com/images/blogimages/remix-cloudflare/api_token_summary.png)\n\nIf you're happy, click **Create Token**. Cloudflare will then show you the new \ntoken. \n\nCopy the token and save it in GitLab: Open your project in GitLab, then \nvisit **Settings** > **CI/CD**. Find **Variables** and click **Expand**. Click \n**Add Variable**.\n\nIn the **Key** field, enter `CLOUDFLARE_API_TOKEN`.\nIn the **Value** field, paste the API token from Cloudflare.\n\nNow make sure your token isn't leaked in any logs: Check both **Protect** \nand **Mask**. When done, click **Add Variable**.\n\n![Adding a Variable in GitLab](https://about.gitlab.com/images/blogimages/remix-cloudflare/adding_cf_api_token_as_variable.gif)\n\n## Create the deployment pipeline\n\nThe last step is to create a GitLab pipeline. In your local repository root \nfolder, create a file named `.gitlab-ci.yml` and add the following content:\n\n```yaml\nstages:\n- deploy\n\ndeploy-worker:\n    image: node:lts\n    stage: deploy\n    environment: production\n    before_script:\n      # install dependencies\n      - npm ci\n    script:\n      - npm run deploy\n    rules:\n      # This rule triggers this job after any push to the default branch\n      - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH\n```\n\nTo learn more about how to configure your GitLab pipeline, read our \n[documentation](https://docs.gitlab.com/ee/ci/).\n\nNow add the file to the repository:\n\n```shell\ngit add .gitlab-ci.yml\ngit commit -m \"Add Deployment Pipeline\"\ngit push\n```\n\nThis last push will immediately run this pipeline. To monitor the pipeline \nprogress, open GitLab. In the left sidebar, find **CI/CD** > **Pipelines**. \nOnce the pipeline is marked as _passed_, your Remix site is live!\n\nIf you've used the create-app instructions from this blogpost, your app should \nhave been configured to use the app name as the Worker's name. Check the \n`name` setting in your project's `wrangler.toml`.\n\nGo to `https://\u003Cworker-name>.\u003Ccloudflare-account-name>.workers.dev` to see \nyour Remix site in action. Congratulations!\n\nIn your Cloudflare dashboard, you can monitor your new app by selecting \n**Workers** from the left sidebar and then clicking on the Worker with the \nname of your app.\n\nFrom now on, any push to your repositories default branch will automatically \nbe built and deployed to Cloudflare. \n\n### Use a custom Domain for your app\n\nIf you want to use your own domain, set up your website as a resource now.\n\nIn the left sidebar, click on **Websites**. In the main window, find and click\nthe **Add Site** button.\n\n![Screenshot: Add a new site in Cloudflare](https://about.gitlab.com/images/blogimages/remix-cloudflare/add_site.png)\n\nNow enter your site's domain. Select a plan that suits your needs.\nFollow the DNS setup instructions provided on the following pages.\n\nOnce you have set up your domain as a website in Cloudflare, go to the \nwebsite settings. (In the left sidebar click **Websites**, then select your \nsite).\n\n![Screenshot: Find your website on the Cloudflare Dashboard](https://about.gitlab.com/images/blogimages/remix-cloudflare/add_route_step_1.png)\n\nThe left sidebar now shows the detail navigation for the selected website. \nClick **Workers Routes**, then click **Add Route**. \n\n![Screenshot: Add a new route to your Site](https://about.gitlab.com/images/blogimages/remix-cloudflare/add_route_step_2.png)\n\nIn the Add Route Modal you can add a dynamic pattern to let Cloudflare know which requests to route to your Worker. \nFor Remix apps that's usually all of them, so if your site's domain is \n`my-site.com`, use `my-site.com/*`. You can also redirect all subdomain \nrequests to the worker by using `*.my-site.com/*` (this is useful if you \nwould like to also serve your site at `www.my-site.com`).\n\nUnder **Service**, select your newly created Worker.\nUnder **Environment**, select **production**.\n\nClick \"Save\".\n\n![Screenshot: Add route modal](https://about.gitlab.com/images/blogimages/remix-cloudflare/add_route_step_3.png)\n\nOnce the DNS servers are updated, your Remix site should be accessible with \nyour custom domain.\n\n\n## Read More\n\n- [Learn more about Cloudflare Workers](https://developers.cloudflare.com/workers/wrangler/configuration/)\n- [Check out the Remix docs](https://remix.run/docs/en/v1)\n- [Learn about GitLab pipelines](https://docs.gitlab.com/ee/ci/)\n",[726,232,675],{"slug":2195,"featured":6,"template":678},"deploy-remix-with-gitlab-and-cloudflare","content:en-us:blog:deploy-remix-with-gitlab-and-cloudflare.yml","Deploy Remix With Gitlab And Cloudflare","en-us/blog/deploy-remix-with-gitlab-and-cloudflare.yml","en-us/blog/deploy-remix-with-gitlab-and-cloudflare",{"_path":2201,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2202,"content":2208,"config":2214,"_id":2216,"_type":16,"title":2217,"_source":17,"_file":2218,"_stem":2219,"_extension":20},"/en-us/blog/environment-friction-cycle",{"title":2203,"description":2204,"ogTitle":2203,"ogDescription":2204,"noIndex":6,"ogImage":2205,"ogUrl":2206,"ogSiteName":692,"ogType":693,"canonicalUrls":2206,"schema":2207},"How GitLab eliminates value stream friction in dev environments","It is important to have the complete picture of scaled effects in view when designing automation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682507/Blog/Hero%20Images/sandeep-singh-3KbACriapqQ-unsplash.jpg","https://about.gitlab.com/blog/environment-friction-cycle","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab can eliminate the massive value stream friction of developer environment provisioning and cleanup\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2022-11-17\",\n      }",{"title":2209,"description":2204,"authors":2210,"heroImage":2205,"date":2211,"body":2212,"category":14,"tags":2213},"How GitLab can eliminate the massive value stream friction of developer environment provisioning and cleanup",[1701],"2022-11-17","\n\nA strong DevOps value stream drives developer empowerment as far left as possible. In GitLab, this is embodied in per-feature branch merge requests that are rich with automated code quality and defect information - including not only findings - but automated remediation capabilities and collaboration. Some defects and code quality issues can only be found by analyzing a running copy of the application, including DAST, IAST, fuzzing and many others. GitLab has built a fully automated, seamless developer environment lifecycle management approach right into the developer experience. In fact, it’s so seamlessly built-in, it can be easy to overlook how critical developer environment lifecycle management is. This article will highlight why and how GitLab adds value using developer environment automation. In addition, while GitLab provides out of the box developer environment lifecycle management for Kubernetes, this article demonstrates an approach and a working example of how to extend that capability to other common cloud-based application framework PaaS offerings.\n\n## Provisioning of development environments is generally a negative feedback loop\n\nIn a prior job, I worked on a DevOps transformation team that supported multiple massive shared development environments in AWS. They were accessible to more than 4,000 developers working to build more than 100 SaaS applications and utility stacks. In the journey to the AWS Cloud, each development team took ownership of the automation required to deploy their applications. Since developers were able to self-service, over time this solved the problem of development friction generated by waiting for environments to be provisioned for testing, feature experiments, integration experiments, etc. \n\nHowever, the other half of the problem then ballooned - environment sprawl - with an untold number of environments idling without management and without knowledge of when they could be torn down. Over time the development environment cost became a significant multiple of production costs. The cloud has solved problems with environment provisioning bottlenecks due to hardware acquisition and provisioning, but this can also inadvertently fuel the high costs of unmanaged sprawl. This problem understandably causes organizations to raise administrative barriers to new development environments.\n\nIn many organizations this becomes a vicious cycle - most especially if developer environments are operated by a different team, or worse, on an independent budget. Environment justification friction usually comes quickly after discovering the true cost of the current running environments. Developers then have to justify the need for new environment requests and they have to make the gravest of promises to disband the environment as soon as they are done. Another friction arises when a separate group is tasked with cost controls and environment provisioning and cleanup. This introduces friction in the form of administrative and work queueing delays. Coordination friction also crops up because an accurate understanding of exactly what is needed for an environment can be challenging to convey. When mistakes are made or key information is missing, developers must go back and forth on support requests to get the configuration completely correct.\n\n## Partial automation can worsen the problem\n\nThat’s the first half of the environment lifecycle, but as I mentioned, even if that is fully automated and under the control of developers, the other half of the feedback loop comes into play. When a given development environment has fulfilled its initial justification reason, the team does not want to destroy it because environments are so hard to justify and create. Then the sprawl starts and, of course, the barriers to new environments are raised even higher. This is a classic negative feedback loop.\n\nSystems theory shows us that sometimes there are just a few key factors in stopping or even reversing a negative feedback loop. Lets take this specific problem apart and talk about how GitLab solves for it.\n\n## Treat developer environments as a complete lifecycle\n\nIn the prior example it is evident that by leaving out the last stage of the environment lifecycle - retirement or tear down - we still end up with a negative feedback loop. Removing provisioning friction actually makes the problem worse if retirement friction is not also addressed at the same time. Solutions to this problem need to address the entire lifecycle to avoid impacting value stream velocity. Neglecting or avoiding the retirement stage of a lifecycle is a common problem across all types of systems. In contrast, by addressing the entire lifecycle we can transform it from being a negative feedback loop to a managed lifecycle.\n\n## The problems of who and when\n\nBuried inside the insidious friction loop are a couple key coordination problems we’ll call “Who and When.” Basically, \"Who\" should create environments and \"When\" should they be created to ensure reasonable cost optimization? Then again, _Who_ should cleanup environments and _When_ do you know that the environment is no longer needed with certainty? Even with highly collaborative teams working hard together for maximum business value, these questions present a difficulty that frequently results in environments running for a long time before they are used and after they are no longer needed. The knowledge of appropriate timing plays a critical role in gaining control over this source of friction.\n\n## The problem of non-immutable development environments\n\nFriction in environment lifecycle management creates a substantial knock-on problem associated with long-lived environments. Long-lived environments that are updated multiple times for various independent projects start to accumulate configuration rot; they become snowflakes with small changes that are left over from non-implemented experiments, software or configuration removals, and other irrelevant bits and pieces. Immutability is the practice of not doing “in place” updates to a computing element, but rather destroying it and replacing it with a fresh, built-from-scratch, element. Docker has made this concept very accepted and effective in production workloads, but development environments frequently do not have this attribute due to automating without the design constraint of immutability, so they are updated in-place for reuse by various initiatives. If the environment lifecycle is not fully automated, it impossible to make them workable on a per-feature branch basis.\n\n## The problem of non-isolated development environments \n\nWhen environments are manually provisioned or when there is a lot of cost or administrative friction to setting them up, environment sharing becomes more common place. This creates sharing contention at many levels. Waiting to schedule into use an environment, pressure to complete work quickly so others can use the environment, and restrictions on the types of changes that can be made to shared environments are just some of the common sharing contention elements that arise. If environments can be isolated, then sharing contention friction evaporates. Pushing this to the extreme of a per-feature branch granularity brings many benefits, but is also difficult.\n\n## Effect on the development value stream\n\nThe effect that a friction-filled environment lifecycle has on the value stream can be immense - how many stories have you heard of projects waylaid for weeks or months while waiting on environment provisioning? What about defects shipped to production because a shared environment had left over configuration during testing? Frequently this friction is tolerated in the value stream because no one will argue that unlimited environment sprawl is an unwise use of company resources. We all turn off the lights in our home when we are no longer using a room and it is good business sense and good stewardship not to leave idle resources running at work.\n\nThe concept of good stewardship of planetary resources is actually becoming an architectural level priority in the technology sector. This is in evidenced in AWS’ [introduction of the “Sustainability” pillar to the AWS Well Architected principals in 2021](https://aws.amazon.com/blogs/aws/sustainability-pillar-well-architected-framework/) and many other green initiatives in the technology sector.\n\nIt’s imperative that efforts to improve the development value stream consider whether developer environment management friction is hampering the breadth, depth and velocity of product management and software development.\n\n## Seamless and fully automated review environment lifecycle management\n\nWhat if this negative feedback loop could be stopped? What if new environments were seamless and automatically created right at the moment they were needed? What if developers were completely happy to immediately tear down an environment when they were done because it takes no justification nor effort on their part to create new one at will?\n\nEnter GitLab Review Environments!\n\nGitLab review apps are created by the developer action of creating a new branch. No humans are involved as the environment is deployed while the developer is musing their first code changes on their branch.\n\nAs the developer pushes code updates the review apps are automatically updated with the changes and all quality checks and security scanning are run to ensure the developer understands that they introduced a vulnerability or quality defect. This is done within the shortest possible amount of time after the defect was introduced.\n\nWhen the developer merges their code, the review app is automatically torn down.\n\nThis seamless approach to developer environment provisioning and cleanup addresses enough of the critical factors in the negative feedback loop that it is effectively nullified.\n\nConsider:\n\n- Developer environment provisioning and cleanup are fully automated, transparent, developer-initiated activities. They do not consume people nor human process resources, which are always legions slower and more expensive than technology solutions.\n- Provisioning and cleanup timing are exactly synchronized with the developer’s need, preventing inefficiencies in idle time before or after environment usage.\n- They are immutable on a new branch basis - a new branch always creates a new environment from fresh copy of the latest code.\n- They are isolated - no sharing contention and no mixing of varying configuration.\n- They treat developer environments as a lifecycle.\n\nIt is so transparent that some developers may not even realize that their feature branch has an isolated environment associated with it.\n\n## Hard dollar costs are important and opportunity costs are paramount\n\nGitLab environments positively contribute to the value stream in two critical ways. First, the actual waste of idle machines is dramatically reduced. However, more importantly, all the human processes that end up being applied to managing that waste also disappear. Machines running in the cloud are only lost money. Inefficient use of people’s time carries a high dollar cost but it also carries a higher opportunity cost. There are so many value-generating activities people can do when their time is unencumbered by cost-control administration.\n\n## Multiplying the value stream contributions of developer review environments\n\nDeveloper environment friction is an industry-wide challenge and GitLab nearly eliminates the core problems of this feedback cycle. However, GitLab has also gone way beyond simply addressing this problem by creating a lot of additional value through seamless per-feature branch developer environments.\n\nHere is a visualization of where dynamic review environments plug into the overall GitLab developer workflow.\n\n![](https://about.gitlab.com/images/blogimages/environment-friction-lifecycle/gitlabenvironmentlifecycle.png)\n\n**Figure 1: Review environments with AWS Cloud Services**\n\nFigure 1 is showing GitLab’s full development cycle support with a little art of the possible thrown in around interfacing with AWS deployment services. The green dashed arrow indicates that GitLab deploys a review environment when the branch is first created. Since the green arrow is part of the developer's iteration loop, the green arrow is also depicting that review app updates are done on each code push. \n\nThe light purple box is showing that the iterative development and CI checks are all within the context of a merge request (MR), which provides a Single Pane of Glass (SPOG) for all quality checks, vulnerabilities and collaboration. Finally, when the merge is done, the review environment is cleaned up. The feature branch merge request is the furthest left that visibility and remediation can be shifted. GitLab’s shifting of this into the developer feature branch is what gives developers a semi-private opportunity to fix any quality or security findings with the specific code they have added or updated.\n\nOne other thing to note here is that when GitLab CD code is engineered to handle review environments, it is reused for all other preproduction and production environments. The set of AWS icons after the “Release” icon would be using the same deployment code. However, if the GitLab CD code is engineered only around deploying to a set of static environments, it is not automatically capable of review environments. Review environment support is a superset of static environment support.\n\n## Review environments enable a profound shift left of visibility and remediation\n\nAt GitLab “shift left” is not just about “problem visibility” but also about “full developer enablement to resolve problems” while in-context. GitLab merge requests provide critical elements that encourage developers to get into a habit of defect remediation:\n\n- **Context** - Defect and vulnerability reporting is only for code the developer changed in their branch and is tracked by the merge request (MR) for that branch.\n- **Responsibility** - Since MRs and branches are associated to an individual, it is evident to the developer (and the whole team) what defects were introduced or discovered by which developers.\n- **Timing** - Developers become aware of defects nearly as soon as they are introduced, not weeks or months after having integrated with other code. If they were working on a physical product, we can envision that all the parts are still on the assembly bench.\n- **Visibility - Appropriately Local, Then Appropriately Global** - Visibility of defects is context specific. While a developer has an open MR that is still a work in progress, they can be left alone to remedy accidentally-introduced defects with little concern from others because the visibility is local to the MR. However, once they seek approvals to merge their code, then the approval process for the MR will cause the visibility of any unresolved defects and vulnerabilities to come to the attention of everyone involved in the approval process. This ensures that oversight happens with just the right timing - not too early and not forgotten. This makes a large-scale contribution to human efficiency in the development value stream.\n- **Advisement** - As much as possible GitLab integrates tools and advice right into the feature branch MR context where the defects are visible. Developers are given full vulnerability details and can take just-in-time training on specific vulnerabilities. \n- **Automated Remediation** - Developers can choose to apply auto-remediations when they are available.\n- **Collaboration** - They can use MR comments and new issues to collaborate with team mates throughout the organization on resolving defects of all types.\n\nHaving seamless, effortless review environments at a per-feature branch granularity is a critical ingredient in GitLab’s ability to maximize the shift left of the above developer capabilities. This is most critical in the developer checks that require a running copy of application, which is provided by the review environments. These checks include things such as DAST, IAST, API fuzzing and accessibility testing. The industry is also continuing to multiply the types of defect scanners that require an actively running copy of the application.\n\n## Extending GitLab review environments to other cloud application framework PaaS\n\nSo you may be thinking, “I love GitLab review environments, but not all of our applications are targeting Kubernetes.” It is true that the out- of-the-box showcasing of GitLab review environments depends on Kubernetes. One of the key reasons for this is that Kubernetes provides an integrated declarative deployment capability known as deployment manifests. The environment isolation capability, known as namespaces, also provides a critical capability. GitLab wires these Kubernetes capabilities up to a few key pieces of GitLab CD to accomplish the magic of isolated, per-feature branch review environments.\n\nAs far as I know there is no formal or defacto industry term for what I’ll call “Cloud Application Framework PaaS.” Cloud-provided PaaS can be targeted at various “levels” of the problem of building applications. For instance, primitive components such as AWS ELB address the problem of application load balancing by providing a variety of virtual, cloud-scaling and secured appliances that you can use as a component of building an application. Another example is [AWS Cognito](https://aws.amazon.com/cognito/) to help with providing user login and profile services to an application build.\n\nHowever, there are also cloud PaaS offerings that seek to solve the entire problem of rapid application building and maintenance. These are services like AWS Amplify and AWS AppRunner. These services frequently knit together primitive PaaS components (such as described above) into a composite that attempts to accelerate the entire process of building applications. Frequently these PaaS also include special CLIs or other developer tools that attempt to abstract the creation, maintenance and deployment of an Infrastructure as Code layer. They also tend to be [GitOps](/topics/gitops/)-oriented by storing this IaC in the same repository as the application code, which enables full control over deployments via Git controls such as branches and merge requests.\n\nThis approach relieves developers of early stage applications from having to learn IaC or hire IaC operations professionals too early. Basically it allows avoidance of overly early optimization of onboarding IaC skills. If the application is indeed successful it is quite common to outgrow the integrated IaC support provided by these specialized PaaS, however, the evolution is very natural because the managed IaC can simply start to be developed by specialists.\n\nThe distinction of cloud application framework PaaS is important when understanding where GitLab can create compound value with Dynamic Review Environments. I will refer to this kind of PaaS as “Cloud Application Infrastructure PaaS” that tries to solve the entire “Building Applications Problem.”\n\nSo we have a bunch of GitLab interfaces and conventions for implementing seamless developer review environments and we have non-Kubernetes cloud application infrastructures that provide declarative deployment interfaces and we can indeed make them work together! Interesting it is all done in GitLab CI YAML, which means that once you see the art of the possible, you can start implementing dynamic review environment lifecycle management for many custom environment types with the existing GitLab features. \n\n## A working, non-Kubernetes example of dynamic review environments in action\n\n![](https://about.gitlab.com/images/blogimages/environment-friction-lifecycle/CloudFormationDeployAnimatedGif.gif)\n\n**Figure 2: Working CD example of review environments for AWS CloudFormation**\n\nFigure 2 shows the details of an actual non-Kubernetes working example called CloudFormation AutoDeploy With Dynamic Review Environments. This project enables any AWS CloudFormation template to be deployed. It specifically supports an isolated stack deployment whenever a review branch is created and then also destroys that environment when the branch is merged. \n\nHere are some of the key design constraints and best practices that allow it to support automated review environments.:\n\n- **The code is implemented as an include.** Notice that the main [.gitlab-ci.yml](https://gitlab.com/guided-explorations/aws/cloudformation-deploy/-/blob/main/.gitlab-ci.yml) files have only variables applicable to this project and then the inclusion of Deploy-AWSCloudFormation.gitlab-ci.yml. This allows you to treat the CloudFormation integration as a managed process, shared include to be improved and updated. If the stress of backward compatibility of managing a shared dependency is too much, you can encourage developers to make a copy of this file to essentially version peg it with their project.\n\n- **Avoids Conflict with Auto DevOps CI Stage Names** - The [standard stages of Auto Devops are here](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml#L70). This constraint allows the auto deploy template to be leveraged. \n\n- **Creates and Sequences Custom Stages as Necessary** - For instance, you can see we’ve added `create-changeset` stage and jobs.\n\n- The `deploy-review` job and it’s `environment:` section must have a very specific construction, let’s look at the important details:\n\n  ```\n    rules:\n      - if: '$CI_COMMIT_BRANCH == \"main\"'\n        when: never\n      - if: '$REVIEW_DISABLED'\n        when: never\n      - if: '($CI_COMMIT_TAG || $CI_COMMIT_BRANCH) && $REQUIRE_CHANGESET_APPROVALS == \"true\"'\n        when: manual\n      - if: '($CI_COMMIT_TAG || $CI_COMMIT_BRANCH) && $REQUIRE_CHANGESET_APPROVALS != \"true\"'\n    artifacts:\n      reports:\n        dotenv: envurl.env\n    environment:\n      name: review/$CI_COMMIT_REF_SLUG\n      url: $DYNAMIC_ENVIRONMENT_URL\n      on_stop: stop_review\n  ```\n\n  \n\n  - `rules:` are used to ensure this job only runs when we are not on the main branch. The main branch implements long lived stage and prod environments.\n  - `artifacts:reports:dotenv` allows variables populated during a CI job to become pipeline level variables. The most critical role this does in this job is to allow the URL retrieved from CloudFormation Outputs to be populated into the variable DYNAMIC_ENVIRONMENT_URL. The file `enviurl.env` would have at least the line `DYNAMIC_ENVIRONMENT_URL={url-from-cloudformation}` in it. You can see this in the job code as `echo \"DYNAMIC_ENVIRONMENT_URL=${STACK_ENV_URL}\" >> envurl.env`\n  - `environment:name:` is using the Auto Deploy convention of placing review apps under the review environments top level called `review` The reference $CI_COMMIT_REF_SLUG ensures that the branch (or tag name) is used, but with all illegal characters removed. By your development convention, the Environment Name should become a part of the IaC constructs that ensure both uniqueness as well as identifiability by this pipeline. In GitLab's standard auto deploy for Kubernetes this is done by constructing a namespace that contains the name in this provided parameter. In CloudFormation we make it part of the Stack Name. The value here is exposed in the job as the variable ${ENVRONMENT}.\n  - `environment:url:` it is not self-evident here that the variable DYNAMIC_ENVIRONMENT_URL was populated by the deployment job and added to the file `enviro.env` so that it would contain the right value at this time. This causes the GitLab “Environment” page to have a clickable link to visit the environment. It also is used by DAST and other live application scan engines to find and scan the isolated environment.\n  - `environment:on_stop:` in the deploy-review job is what maps to the `stop_review` named job. This is the magic sauce behind automatic environment deletion when a feature branch is merged. `stop_review` must be written with the correct commands to accomplish the teardown.\n\n## A reusable engineering pattern\n\nThis CloudFormation pattern serves as a higher-level pattern of how GitLab review environments can be adopted to any other cloud “Application Level PaaS.” This is a term I use to indicate a cloud PaaS that is abstracted highly enough that developers think of it as “a place to deploy applications.” Perhaps a good way to contrast it with PaaS that does not claim to serve as an entire application platform. Cloud-based load balancers are a good example of a PaaS that performs a utility function for applications but is not a place to build an entire cloud application. \n\n## Application PaaS for abstracting IaC concerns for developers\n\nGitLab auto deploy combines well with the cloud application framework PaaS that has a disposition toward developer productivity by reducing or eliminating IaC management required by developers. AWS Amplify has such productivity support in the form of a developer specific CLI which allows impacting to be authored and updated in the same Git repository where the application code is stored. Adding an entire scaling database PaaS is as simple as running a single CLI command.\n\nGenerally such Application PaaS not only generate and help maintain IaC through highly abstracted CLI or UI actions, they also contain a single `deploy` command which is easily combined with a GitLab Auto Deploy template for working with that particular Application PaaS.\n\n## Wrap up\n\nHopefully this article has helped you understand that:\n\n- GitLab already contains a super valuable feature that automates developer environment lifecycle management.\n- It is critical in addressing a key friction in the DevOps value chain.\n- It can be extended beyond Kubernetes to other cloud application framework PaaS offerings.\n\n\nPhoto by [Sandeep Singh](https://unsplash.com/@funjabi?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/friction?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n",[894,750,771],{"slug":2215,"featured":6,"template":678},"environment-friction-cycle","content:en-us:blog:environment-friction-cycle.yml","Environment Friction Cycle","en-us/blog/environment-friction-cycle.yml","en-us/blog/environment-friction-cycle",{"_path":2221,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2222,"content":2228,"config":2233,"_id":2235,"_type":16,"title":2236,"_source":17,"_file":2237,"_stem":2238,"_extension":20},"/en-us/blog/simple-kubernetes-management-with-gitlab",{"title":2223,"description":2224,"ogTitle":2223,"ogDescription":2224,"noIndex":6,"ogImage":2225,"ogUrl":2226,"ogSiteName":692,"ogType":693,"canonicalUrls":2226,"schema":2227},"Simple Kubernetes management with GitLab","Follow our tutorial to provision a Kubernetes cluster and manage it with IAC using Terraform and Helm in 20 minutes or less.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670037/Blog/Hero%20Images/auto-deploy-google-cloud.jpg","https://about.gitlab.com/blog/simple-kubernetes-management-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Simple Kubernetes management with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2022-11-15\",\n      }",{"title":2223,"description":2224,"authors":2229,"heroImage":2225,"date":2230,"body":2231,"category":14,"tags":2232},[789],"2022-11-15","\n\nKubernetes can be very complex and has dozens of tutorials out there on how to provision and manage a cluster. This tutorial aims to provide a simple, lightweight solution to provision a Kubernetes cluster and manage it with infrastructure as code (IaC) using Terraform and Helm in 20 minutes or less.\n\n**The final product of this tutorial will be two IaC repositories with fully functional CI/CD pipelines:**\n\n1. [gitlab-terraform-k8s](https://gitlab.com/gitlab-org/configure/examples/gitlab-terraform-eks) - A single source of truth to provision, configure, and manage your Kubernetes infrastructure using Terraform\n1. [cluster-management](https://gitlab.com/gitlab-org/project-templates/cluster-management) - A single source of truth to define the desired state of your Kubernetes cluster using the GitLab Agent for Kubernetes and Helm\n\n![Final Product](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/final-product.png){: .shadow}\n\n\n### Prerequisites\n- AWS or GCP account with permissions to provision resources\n- GitLab account \n- Access to a GitLab Runner\n- 20 minutes\n\n### An overview of this tutorial is as follows:\n\n1. Set up the GitLab Terraform Kubernetes Template 🏗️\n2. Register the GitLab Agent 🕵️\n3. Add in Cloud Credentials ☁️🔑\n4. Set up the Kubernetes Cluster Management Template 🚧\n5. Enjoy your Kubernetes Cluster completely managed in code! 👏\n\n## Set up the GitLab Terraform Kubernetes Template\n\nStart by importing the example project by URL - [https://gitlab.com/projects/new#import_project](https://gitlab.com/projects/new#import_project)\n\nTo import the project:\n\n1. In GitLab, on the top bar, select **Main menu > Projects > View all projects**.\n2. On the right of the page, select **New project**.\n3. Select **Import project**.\n4. Select **Repository by URL**.\n5. For the Git repository URL:\n- [GCP Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine): https://gitlab.com/gitlab-org/configure/examples/gitlab-terraform-gke.git\n- [AWS Elastic Kubernetes Service](https://aws.amazon.com/eks/): https://gitlab.com/gitlab-org/configure/examples/gitlab-terraform-eks.git\n6. Complete the fields and select **Create project**.\n\n## Register the GitLab Agent\n\nWith your newly created **gitlab-terraform-k8s** repo, create a GitLab Agent for Kubernetes:\n\n1. On the left sidebar, select **Infrastructure > Kubernetes clusters**. Select **Connect a cluster (agent).**\n2. From the **Select an agent** dropdown list, select **eks-agent/gke-agent and select **Register an agent**.\n3. GitLab generates a registration token for the agent. **Securely store this secret token, as you will need it later.**\n4. GitLab provides an address for the agent server (KAS). Securely store this as you will also need it later.\n5. Add this to the **gitlab-terraform-eks/.gitlab/agents/eks-agent/config.yaml** in order to allow the GitLab Agent to have access to your entire group.\n\n```yaml\nci_access:\n  groups:\n    - id: your-namespace-here\n```\n\n![Register GitLab Agent](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/register-gitlab-agent.png){: .shadow}\n\n\n## Add in your Cloud Credentials to CI/CD variables\n\n### [AWS EKS](https://aws.amazon.com/eks/)\n\nOn the left sidebar, select **Settings > CI/CD. Expand Variables**.\n1. Set the variable **AWS_ACCESS_KEY_ID** to your AWS access key ID.\n2. Set the variable **AWS_SECRET_ACCESS_KEY** to your AWS secret access key.\n3. Set the variable **TF_VAR_agent_token** to the agent token displayed in the previous task.\n4. Set the variable **TF_VAR_kas_address** to the agent server address displayed in the previous task.\n\n![Add in CI/CD variables](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/cicd-variables.png){: .shadow}\n\n\n### [GCP GKE](https://cloud.google.com/kubernetes-engine)\n\n1. To authenticate GCP with GitLab, create a GCP service account with the following roles: **Compute Network Viewer, Kubernetes Engine Admin, Service Account User, and Service Account Admin**. Both User and Admin service accounts are necessary. The User role impersonates the default service account when creating the node pool. The Admin role creates a service account in the kube-system namespace.\n2. **Download the JSON file** with the service account key you created in the previous step.\n3. On your computer, encode the JSON file to base64 (replace /path/to/sa-key.json to the path to your key):\n\n```\nbase64 -i /path/to/sa-key.json | tr -d\n```\n\n- Use the output of this command as the **BASE64_GOOGLE_CREDENTIALS** environment variable in the next step.\n\nOn the left sidebar, select **Settings > CI/CD. Expand Variables**.\n5. Set the variable **BASE64_GOOGLE_CREDENTIALS** to the base64 encoded JSON file you just created.\n6. Set the variable **TF_VAR_gcp_project** to your GCP’s project name.\n7. Set the variable **TF_VAR_agent_token** to the agent token displayed in the previous task.\n8. Set the variable **TF_VAR_kas_address** to the agent server address displayed in the previous task.\n\n## Run GitLab CI to deploy your Kubernetes cluster!\n\n![Deploy Kubernetes cluster](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/pipeline-view.png){: .shadow}\n\nWhen successfully completed, view the cluster in the AWS/GCP console!\n\n![AWS EKS](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/aws-eks.png){: .shadow}\n\n### You are halfway done! 👏 Keep it up!\n\n## Set up the Kubernetes Cluster Management Project\n\nCreate a project from the cluster management project template - [https://gitlab.com/projects/new#create_from_template](https://gitlab.com/projects/new#create_from_template)\n\n1. In GitLab, on the top bar, select **Main menu > Projects > View all projects**.\n2. On the right of the page, select **New project**.\n3. Select **Create from template**.\n4. From the list of templates, next to **GitLab Cluster Management**, select **Use template**.\n5. Enter the project details. Ensure this project is created in the same namespace as the gitlab-terraform-k8s project.\n6. Select **Create project**.\n7. Once the project is created on the left sidebar, select **Settings > CI/CD. Expand Variables**.\n8. Set the variable KUBE_CONTEXT to point to the GitLab Agent. For example, `noah-ing-demos/infrastructure/gitlab-terraform-eks:eks-agent`.\n\n![Set Kube Context](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/kube-config.png){: .shadow}\n\n\n- **Uncomment the applications you'd like to be installed** into your Kubernetes cluster in the **helmfile.yaml**. In this instance I chose ingress, cert-manager, prometheus, and Vault. \n\n![Uncomment Applications in helmfile](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/helmfile.png){: .shadow}\n\nThat will trigger your **CI/CD pipeline** and it should look like this.\n\n![Cluster Management CI/CD](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/cluster-management-cicd.png){: .shadow}\n\nOnce completed, **go to the AWS/GCP console** and check out all the deployed resources!\n\n![Deployed EKS applications](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/deployed-eks-applications.png){: .shadow}\n\n### Voila! 🎉\n\n## Enjoy your Kubernetes cluster completely defined in code! 👏👏👏\n\nNow with these two repositories you can **manage a Kubernetes cluster entirely through code**:\n\n- For managing the Kubernetes cluster's infrastructure and configuring its resources you can make changes to the [gitlab-terraform-eks](https://gitlab.com/gitlab-org/configure/examples/gitlab-terraform-eks) repository you have setup. This project has a **Terraform CI/CD pipeline** that will allow you to **review, provision, configure, and manage your Kubernetes** infrastructure with ease.\n\n- For managing the desired state of the Kubernetes cluster, the [cluster-management](https://gitlab.com/gitlab-org/project-templates/cluster-management) repository has a **GitLab Agent** set up and will **deploy any Kubernetes objects defined in the helm files**.\n\n➡️ Bonus: If you'd like to deploy your own application to the Kubernetes cluster, then add to your **cluster-management** `helmfile` and see the GitLab Agent for Kubernetes roll it out with ease!\n\n\n## References\n- [Create a New GKE Cluster](https://docs.gitlab.com/ee/user/infrastructure/clusters/connect/new_gke_cluster.html)\n- [Create a New EKS Cluster](https://docs.gitlab.com/ee/user/infrastructure/clusters/connect/new_eks_cluster.html)\n- [Cluster Management Project](https://docs.gitlab.com/ee/user/clusters/management_project.html)\n\n\n## Related posts\n- [The ultimate guide to GitOps with GitLab](https://about.gitlab.com/blog/the-ultimate-guide-to-gitops-with-gitlab/)\n- [GitOps with GitLab: Infrastructure provisioning with GitLab and Terraform](https://about.gitlab.com/blog/gitops-with-gitlab-infrastructure-provisioning/)\n- [GitOps with GitLab: Connect with a Kubernetes cluster](https://about.gitlab.com/blog/gitops-with-gitlab-connecting-the-cluster/)\n",[726,1002,535,832,937,894],{"slug":2234,"featured":6,"template":678},"simple-kubernetes-management-with-gitlab","content:en-us:blog:simple-kubernetes-management-with-gitlab.yml","Simple Kubernetes Management With Gitlab","en-us/blog/simple-kubernetes-management-with-gitlab.yml","en-us/blog/simple-kubernetes-management-with-gitlab",{"_path":2240,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2241,"content":2247,"config":2253,"_id":2255,"_type":16,"title":2256,"_source":17,"_file":2257,"_stem":2258,"_extension":20},"/en-us/blog/rebase-in-real-life",{"title":2242,"description":2243,"ogTitle":2242,"ogDescription":2243,"noIndex":6,"ogImage":2244,"ogUrl":2245,"ogSiteName":692,"ogType":693,"canonicalUrls":2245,"schema":2246},"How to use Git rebase in real life","From fixup to autosquash here are real world ways to leverage Git rebase.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682486/Blog/Hero%20Images/rebase-in-real-life.jpg","https://about.gitlab.com/blog/rebase-in-real-life","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use Git rebase in real life\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Toon Claes\"}],\n        \"datePublished\": \"2022-11-08\",\n      }",{"title":2242,"description":2243,"authors":2248,"heroImage":2244,"date":2250,"body":2251,"category":14,"tags":2252},[2249],"Toon Claes","2022-11-08","\n\nMy colleague [Chris](/company/team/#chriscool) recently wrote about [how to take advantage of Git\nrebase](/blog/take-advantage-of-git-rebase/). In this post we'll\nexplain how you can take these techniques, and apply them to daily developer life.\n\n## Fixup\n\nImagine you have created a merge request, and there are some pipeline failures\nand some comments from reviews, and suddenly your [commit history](/blog/keeping-git-commit-history-clean/) looks something\nlike this:\n\n```shell\n$ git log --oneline\n\n8f8ef5af (HEAD -> my-change) More CI fixes\ne4fb7935 Apply suggestion from reviewer\nc1a1bec6 Apply suggestion from reviewer\n673222be Make linter happy\na0c30577 Fix CI failure for X\n5ff160db Implement feature Y\nf68080e3 Implement feature X\n3cdbc201 (origin/main, origin/HEAD, main) Merge branch 'other-change' into 'main'\n...\n```\n\nIn this example there are 2 commits implementing feature X and Y, followed by a\nhandful of commits that aren't useful on their own. We used the fixup feature of\nGit rebase to get rid of them.\n\n### Finding the commit\n\nThe idea of this technique is to integrate the changes of these follow-up\ncommits into the commits that introduced each feature. This means for each\nfollow-up commit we need to determine which commit they belong to.\n\nBased on the filename you may already know which commits belong together, but if\nyou don't you can use git-blame to find the commit.\n\n```shell\ngit blame \u003Crevision> -L\u003Cstart>,\u003Cend> \u003Cfilename>\n```\n\nWith the option `-L` we'll specify a range of a line numbers we're interested in.\nHere `\u003Cend>` cannot be omitted, but it can be the same as `\u003Cstart>`. You can\nomit `\u003Crevision>`, but you probably shouldn't because you want to skip over the\ncommits you want to rebase away. Your command will look something like this:\n\n```shell\n$ git blame 5ff160db -L22,22 app/model/user.rb\n\nf68080e3 22) scope :admins, -> { where(admin: true) }\n```\n\nThis tells us line `22` was touched by `f68080e3 Implement feature X`.\n\nNow repeat this step until you know the commit for each of the commits you want\nto rebase out.\n\n### Interactive rebase\n\nThe next step is to start the interactive rebase:\n\n```shell\n$ git rebase -i main\n```\n\nHere you're presented with the list of instructions in your `$EDITOR`:\n\n``` text\npick 8f8ef5af More CI fixes\npick e4fb7935 Apply suggestion from reviewer\npick c1a1bec6 Apply suggestion from reviewer\npick 673222be Make linter happy\npick a0c30577 Fix CI failure for X\npick 5ff160db Implement feature Y\npick f68080e3 Implement feature X\n```\n\nNow you'll need to change these instructions to something like this:\n\n```text\nfixup 8f8ef5af More CI fixes\nfixup e4fb7935 Apply suggestion from reviewer\nfixup 673222be Make linter happy\npick 5ff160db Implement feature Y\nfixup c1a1bec6 Apply suggestion from reviewer\nfixup a0c30577 Fix CI failure for X\npick f68080e3 Implement feature X\n```\n\nAs you can see I've reordered the commits, and I've changed some occurrences of\n`pick` to `fixup`.\n\nThe Git rebase will process this list bottom-to-top. It takes each line with\n`pick` and uses its commit message. On each line starting with `fixup` it\nintegrates the changes into the commit below. When you've saved this file and\nclosed your `$EDITOR`, the Git history will look something like this:\n\n```shell\n$ git log --oneline\n\ne880c726 (HEAD -> my-change) Implement feature Y\ne088ea06 Implement feature X\n3cdbc201 (origin/main, origin/HEAD, main) Merge branch 'other-change' into 'main'\n...\n```\n\n## Autosquash\n\nUsing autosquash can be an alternative technique to the above. First we'll\nuncommit all the commits we want to get rid of.\n\n```shell\ngit checkout f68080e3\n```\n\nNow all changes only exist in your working tree, and are gone from the commit\nhistory. You can use `git add` or `git add -p` to stage all changes related to\n`e088ea06 Implement feature X`. Instead of running `git commit` or `git commit -m`\nwe'll use the `--fixup` option:\n\n```shell\n$ git commit --fixup e088ea06\n```\n\nNow the history will look something like:\n\n```shell\n$ git log --oneline\n\ne744646b (HEAD -> my-change) fixup! Implement feature X\n5ff160db Implement feature Y\nf68080e3 Implement feature X\n3cdbc201 (origin/main, origin/HEAD, main) Merge branch 'other-change' into 'main'\n...\n```\n\nAll remaining changes should now belong to `5ff160db Implement feature Y` so we\ncan run:\n\n```shell\n$ git add .\n\n$ git commit --fixup 5ff160db\n\n$ git log --oneline\n\n18c0fff9 (HEAD -> my-change) fixup! Implement feature Y\ne744646b fixup! Implement feature X\n5ff160db Implement feature Y\nf68080e3 Implement feature X\n3cdbc201 (origin/main, origin/HEAD, main) Merge branch 'other-change' into 'main'\n...\n```\n\nYou can now review the `fixup!` commits and if you're happy with it, run:\n\n```shell\n$ git rebase -i --autosquash main\n```\n\nYou see we provide the extra option `--autosquash`. This option will look for\n`fixup!` commits and automatically reorder those and set their instruction to\n`fixup`. Normally there's nothing for you to be done now, and you can just close\nthe instruction list in your editor. If you type `git log` now you'll see the\n`fixup!` commits are gone.\n\n## Alternatives\n\nFinally, there are some tools that allow you to _absorb_ commits more easily, for\nexample:\n\n* [lib.rs/crates/git-absorb](https://lib.rs/crates/git-absorb)\n* [github.com/MrFlynn/git-absorb](https://github.com/MrFlynn/git-absorb)\n* [gitlab.com/bertoldia/git-absorb](https://gitlab.com/bertoldia/git-absorb)\n* [github.com/tummychow/git-absorb](https://github.com/tummychow/git-absorb)\n* [github.com/torbiak/git-autofixup](https://github.com/torbiak/git-autofixup)\n\n[Cover image](https://unsplash.com/photos/qAShc5SV83M) by [Yung Chang](https://unsplash.com/@yungnoma) on [Unsplash](https://unsplash.com/).\n{: .note}\n",[702,727,726],{"slug":2254,"featured":6,"template":678},"rebase-in-real-life","content:en-us:blog:rebase-in-real-life.yml","Rebase In Real Life","en-us/blog/rebase-in-real-life.yml","en-us/blog/rebase-in-real-life",{"_path":2260,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2261,"content":2267,"config":2273,"_id":2275,"_type":16,"title":2276,"_source":17,"_file":2277,"_stem":2278,"_extension":20},"/en-us/blog/cadence-is-everything-10x-engineering-organizations-for-10x-engineers",{"title":2262,"description":2263,"ogTitle":2262,"ogDescription":2263,"noIndex":6,"ogImage":2264,"ogUrl":2265,"ogSiteName":692,"ogType":693,"canonicalUrls":2265,"schema":2266},"Cadence is everything: 10x engineering organizations for 10x engineers","GitLab CEO and co-founder Sid Sijbrandij on the importance of cadence in engineering organizations.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671909/Blog/Hero%20Images/Athlinks_running.jpg","https://about.gitlab.com/blog/cadence-is-everything-10x-engineering-organizations-for-10x-engineers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Cadence is everything: 10x engineering organizations for 10x engineers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2022-11-03\",\n      }",{"title":2262,"description":2263,"authors":2268,"heroImage":2264,"date":2270,"body":2271,"category":14,"tags":2272},[2269],"Sid Sijbrandij","2022-11-03","\nI confess: Although I don’t believe in Bigfoot or Nessie and do believe the moon landings happened, I am convinced that despite the current orthodoxies, [10x engineers](https://svdictionary.com/words/10x-engineer) very much exist and are a major positive force for the industry, and potentially your organization.  If you can find one, convince her to work for you and keep her happy and productive (but I repeat myself).\n\nAlas, finding one is not easy, and no, job adverts stating “We only hire the best” don’t help. However, what you can do is structure your development organization in a way to make such a person productive.  \n\nFortunately, making a 10x developer productive is pretty much the same, as you need to make your development organization productive for everyone, just dialed up to 11, particularly because an inefficient organization will affect a more efficient developer much more dramatically.\n\nUnfortunately, this state appears to be neither natural nor stable.\n\n[Effective organizations are unnatural](https://twitter.com/paulg/status/1556341452740775936?s=21&t=67hekF4Sus5tPryLdZmCHA). The natural state of organizations is bureaucracy and turf wars, and once deprived of effective leadership they revert to their natural state with shocking speed. Similar to organizations in general, development organizations naturally tend toward inefficiency.\n\nMore specifically, development organizations tend toward ever-lengthening cycle times just as much as organizations in general tend toward bureaucracy.  In both cases, this is always for good reasons.  This is really important:  If this tendency toward lengthening cycle times were just stupidity or laziness, it would be significantly easier to counter.  Anthropologist and historian [Joseph Tainter makes a similar point](https://www.youtube.com/watch?v=JsT9V3WQiNA) about civilizations, whose ever-increasing complexity leads to their collapse.  Here as well, the complexity is not introduced willy-nilly, but as a necessary response to problems the civilization faces.  \n\n## The sky’s the limit\n\nSoftware tends to be fairly abstract, but the principles of short cycle times are just applicable in more down-to-earth disciplines, or should I say down-to-air?  First, one of my favorites, the story of how Paul MacCready created the Gossamer Condor to win the first Kremer Prize for human-powered flight.  More recently, Elon Musk’s SpaceX has been out-iterating NASA and the legacy spaceflight companies with results that would have seemed miraculous a couple of decades ago.  Both examples show that while other factors are obviously more important, cadence actually dominates them in short order.\n\nMacGready had come into a bit of debt due to securing a friend’s business loan, and set his eyes on the first Kremer prize for human-powered flight. This had gone unclaimed for 17 years, but not for lack of trying: There had been over 50 official attempts and all failed.  It was a Very Hard Problem we couldn’t solve so it obviously required the most aerodynamically efficient and sophisticated designs possible.  So that’s what people did, and when their sophisticated plane inevitably crashed — after all they were working on the edge of the possible — it took them a year or more to rebuild it.\n\nMacGready approached this from the opposite angle:  He would concentrate on a plane that didn’t have to be so efficient and sophisticated, but instead would fly low and slow, be light and very repairable, aiming for 12 crashes a day. The Gossamer Condor was built out of some lightweight aluminum struts and mylar foil and could usually be repaired with Scotch tape. It was a weird contraption that didn’t look like it could fly.\n\n![The Condor](https://about.gitlab.com/images/blogimages/10x.png)\n\nWithin a few months, the team had accumulated more flights, and more crashes, than the rest of the competition combined. With all that experience, they then also understood the actual problems better than anyone else, for example, how to steer, and soon won the prize, which involved flying a mile in a circle eight.  \n\nThis wasn’t a [one-off fluke](https://www.youtube.com/watch?v=FvmTSpJU-Xc&t=3348s) either: The team went on to win the next Kremer prize as well, crossing the English Channel, then pioneered solar flight and broke the SR-71’s altitude record. The company that came out of the effort nowadays makes drones, including the successful Switchblade drones for the U.S. military that have recently been sent to help in the Ukraine conflict.\n\n## The sky’s not the limit\n\nMore recently, SpaceX has been demonstrating the efficacy of iterative development, first with the Falcon 9 rocket and now with the Starship program. While the latter hasn’t flown to space yet, and so may still fail completely, both the aim and the achievements so far have been breathtaking, particularly compared to NASA’s Space Launch System (SLS), which was started around the same time and is designed to have similar capabilities, lifting around 100 tons to low earth orbit.\n\nThe NASA SLS is a cost-reduced version of the Constellation program, which was canceled early after quickly outgrowing its projected $150 billion dollar budget.  The reduced development cost of the SLS (so far $23 billion in 10 years) has been achieved by reusing not just designs, but also actual parts from the Space Shuttle program.  Not just the solid rocket boosters, but some of the main engines are the actual parts that flew on shuttles and had been mothballed by NASA.  Despite this part reuse, launches of the fully expendable rocket are predicted to cost somewhat upward of $1 billion per pop.  As of Oct. 20, there have been no flights of any of the hardware (except on space shuttles), and the first test launch scheduled for Nov. 26th will fly the full stack as designed.\n\nIn comparison, the Starship program is estimated to have cost $3 billion so far, with estimates of total development costs varying between $5 billion and $10 billion. This is for a completely new rocket, pretty much unlike any that have come before, designed for full reusability and same-day turnaround after refueling, completely new methane-burning engines, assembly-line production using relatively inexpensive materials and a projected cost target of $10 million per launch. If they work as advertised, just a few Starships could turn the entire launch capacity of planet Earth thus far into a footnote, a rounding error, and they plan to build a thousand of them. That’s why they’re building a factory for making them.\n\nIt’s anyone’s guess whether all this launch capacity, at costs two or more magnitudes lower than currently possible, is really for making humanity multiplanetary by establishing a Mars colony or “just” for making space-based production and asteroid mining feasible.\n\nWhen asked, [Elon Musk put it quite simply](https://www.youtube.com/watch?v=E7MQb9Y4FAE&t=333s):\n\n_“Any given technology development is how many iterations do you have and what’s your time and progress between iterations.”_\n\nThe more quickly you can iterate, the more iterations you have available.  But doesn’t iterating more quickly make the progress between iterations correspondingly less, canceling the effect?  Surprisingly, that turns out not to be the case.  Elon Musk again:\n\n_“So if you have a high production rate, you can have a lot of iterations. You can try  lots of different things, and it’s OK if you blow up an engine because you’ve got a number of engines coming after that.  If you have a small number of engines then you have to be much more conservative, because you can’t risk blowing them up.”_\n\nThe higher iteration rate allows you to take more risks, which in turn allows you to push the boundaries more and thus gather more relevant feedback in each iteration, at the same time that the reduced time frame reduces what you can do. So there will be more failures. For example, engines blowing up or planes crashing.  But as long as the failures provide the information they were supposed to provide, and the individual failure modes aren’t fatal, they aren’t actually failures. You obviously don’t want to be cavalier about this, but accepting that risk allows you to push much farther per iteration.  Musk also mentioned that as one of the main problems of the Space Shuttle program:  They couldn’t afford to have one blow up because even the first flight was manned.\n\n“A high production rate solves many ills,” he says.\n\nIn software, the production rate is the iteration rate.  If you have lots of iterations, it’s OK if one of them was a potentially high-value experiment that didn’t pan out.  If you have one iteration per year, you are less likely to want to take that risk, and your reluctance will be justified. The willingness and ability to take risks is captured in the Extreme Programming (XP) [value of ‘courage.”](http://xp.c2.com/ExtremeValues.html)\n\n## Compound interest and experience\n\nThe reason this works out is mathematical.  If you iterate and actually use the feedback the iteration gives you to improve, you will improve a little bit each time because you will have learned something.  For simplicity’s sake, let’s assume an improvement of 5% per iteration.  This is like compound interest, and while it starts slow, once it ramps up, it gives outsize returns, like any exponential.\n\nImprove 2% per iteration, and after three iterations, you will have improved by 6%, which is essentially the same as a linear improvement.  After 200 iterations, however, and whereas the linear approach will have improved by a respectable factor of 4, the iterative approach will have improved by more than 50x.\n\nApart from the purely mathematical, there is also the human factor:  When we do things over and over again, we really start to figure out how it works. We develop an intuition.\n\n## What the science says\n\nThe simplistic mathematical function is obviously not an accurate model of the real world, but the science actually has concluded that higher iteration rates are the one most important factor for the output of software development teams, at least according to the researchers.  These findings have been published in the book “[Accelerate](https://itrevolution.com/product/accelerate/)\" by Nicole Forsgren, Jez Humble and Gene Kim.  The authors have since moved to Google as the DevOps Research and Assessment (DORA) team and make their [findings available here](https://cloud.google.com/blog/products/devops-sre/announcing-dora-2021-accelerate-state-of-devops-report).  \n\nIn short, they find that performance of software teams correlates strongly with cycle times, with the lowest- performing teams having cycle times measured in months, medium performers in weeks, good performers in days and excellent performers in hours. There is also good evidence for the causality going for cycle times to performance and not the other way around.\n\nBut there’s a deeper connection, because the method of iterating on real-world feedback is really just the scientific method, no more, no less.  It is somewhat surprising that in the field of software, we still often consider the scientific method as unruly and dangerous “cowboy coding,” and instead advocate for what is really little different from pre-science scholasticism as the proper approach to creating software.\n\nTo help us also be more scientific and data driven, the DORA team created metrics, called the [DORA metrics](https://cloud.google.com/blog/products/devops-sre/using-the-four-keys-to-measure-your-devops-performance). These are the following:\n\n- Deployment frequency — How often an organization successfully releases to production\n- Lead time for changes — The amount of time it takes a commit to get into production\n- Change failure rate — The percentage of deployments causing a failure in production\n- Time to restore service — How long it takes an organization to recover from a failure in production\n\n## The dangers of dead reckoning\n\nIn reality, it is much more dangerous to stay away from actual code and real feedback from users for any length of time.  For example, ships before GPS used essentially two methods for navigation: dead reckoning and external fixes.  With dead reckoning, you took a known position, added the course speed and known currents over time to come up with a new position.  However, despite the best equipment and methods, this method always introduces some error because the external factors cannot be known with certainty. And what’s worse, just like improvements accumulate and build on each other over time, so do these errors, making the position ever more uncertain over time.\n\nWhen you are in the middle of the ocean, that might not be a huge problem, but close to shore it can be deadly, which is why the amphibious ships of the Royal Navy were required to use position fixing in intervals of a few minutes. With position fixing, you use the actual external environment, landmarks that you can triangulate to determine your position (and of course GPS is just a version of this, except using satellites for the fix instead of landmarks).  This means you aren’t guessing where you are, you know where you are, and every new measurement clears the slate of any errors; there is no accumulation.\n\nSlides don’t crash, and Jira is patient. You can have 100 tasks that are marked as 99% completed in your tracker of choice and still never ship anything to customers.\n\nReality is that which, when you stop believing in it, doesn’t go away, said science fiction writer Phillip K. Dick, in [How to Build a Universe that Doesn’t Fall Apart Two Days Later](https://deoxy.org/pkd_how2build.htm).\n\nIn Part 2, The Process Equation, we will look at overcoming the forces that tend to push software engineering organizations toward higher cycle times and lower cadence.\n",[1508,894],{"slug":2274,"featured":6,"template":678},"cadence-is-everything-10x-engineering-organizations-for-10x-engineers","content:en-us:blog:cadence-is-everything-10x-engineering-organizations-for-10x-engineers.yml","Cadence Is Everything 10x Engineering Organizations For 10x Engineers","en-us/blog/cadence-is-everything-10x-engineering-organizations-for-10x-engineers.yml","en-us/blog/cadence-is-everything-10x-engineering-organizations-for-10x-engineers",{"_path":2280,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2281,"content":2287,"config":2292,"_id":2294,"_type":16,"title":2295,"_source":17,"_file":2296,"_stem":2297,"_extension":20},"/en-us/blog/how-to-automate-testing-for-a-react-application-with-gitlab",{"title":2282,"description":2283,"ogTitle":2282,"ogDescription":2283,"noIndex":6,"ogImage":2284,"ogUrl":2285,"ogSiteName":692,"ogType":693,"canonicalUrls":2285,"schema":2286},"How to automate testing for a React application with GitLab","Learn how to add React automated tests to a GitLab CI pipeline with this tutorial.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666775/Blog/Hero%20Images/cover.jpg","https://about.gitlab.com/blog/how-to-automate-testing-for-a-react-application-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to automate testing for a React application with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jeremy Wagner\"}],\n        \"datePublished\": \"2022-11-01\",\n      }",{"title":2282,"description":2283,"authors":2288,"heroImage":2284,"date":2289,"body":2290,"category":14,"tags":2291},[1936],"2022-11-01","\n\nReact is a popular JavaScript library for building user interfaces. In this tutorial, I'll show you \nhow to create a new React application, run unit tests as part of the CI process in GitLab, and output\nthe test results and code coverage into the pipeline.\n\n## Prerequisites\n\nFor this tutorial you will need the following:\n\n- [Node.js](https://nodejs.org/en/) >= 14.0.0 and npm >= 5.6 installed on your system\n- [Git](https://git-scm.com/) installed on your system\n- A [GitLab](https://gitlab.com/-/trial_registrations/new) account\n\n## Getting started\n\nTo get started, [create a new project in GitLab](https://docs.gitlab.com/ee/user/project/working_with_projects.html#create-a-project).\n\nWhen you are on the \"Create new project\" screen, select \"Create blank project.\" Fill out the project information \nwith your project name and details. After you create the project, you will be taken to the project with an empty repository.\n\nNext, we will clone the repository to your local machine. Copy the SSH or HTTPS URL from the \"Clone\" button and run the following\ncommand in the terminal for your working directory:\n\n```\ngit clone \u003Cyour copied URL here>\n```\n\n## Create the React app\n\nYou will create a new React application by using [Create React App](https://reactjs.org/docs/create-a-new-react-app.html#create-react-app).\n\nFrom the terminal `cd` into your newly cloned project directory and run this command:\n\n```\nnpx create-react-app .\n```\n\nThe npx CLI tool will create a new React application inside of your current directory.\n\nTo run the application, run the following command in your terminal:\n\n```\nnpm run start\n```\n\nYou can view the application you created in your browser window at `https://localhost:3000`.\n\n![Create React App home page](https://about.gitlab.com/images/blogimages/2022-11-04-how-to-automate-testing-for-a-react-application-with-gitlab/create-react-app.png){: .shadow}\n\nStop your application by pressing `CTRL` + `c` in your terminal. \n\nPush your new application to GitLab by running the following commands:\n\n```\ngit add -A\ngit commit -m \"Initial creation of React application\"\ngit push\n```\n\n## Testing your application\n\nBy default, Create React App uses [jest](https://jestjs.io/) as the test runner and one unit test to run.\n\n```javascript\nimport { render, screen } from '@testing-library/react';\nimport App from './App';\n\ntest('renders learn react link', () => {\n  render(\u003CApp />);\n  const linkElement = screen.getByText(/learn react/i);\n  expect(linkElement).toBeInTheDocument();\n});\n```\n\nInside your `package.json`, you should see that it comes with several scripts.\n\n```javascript\n\"scripts\": {\n    \"start\": \"react-scripts start\",\n    \"build\": \"react-scripts build\",\n    \"test\": \"react-scripts test\",\n    \"eject\": \"react-scripts eject\",\n  }\n```\n\nUse the test script to run the tests in your application by running the following command:\n\n```\nnpm run test\n```\n\nWhen prompted for \"Watch Usage,\" press `a` to run all of the tests. You will see that the existing test passes and it continues to watch for changes.\n\n![CLI passing tests](https://about.gitlab.com/images/blogimages/2022-11-04-how-to-automate-testing-for-a-react-application-with-gitlab/passing-test-cli.png){: .shadow}\n\nFor local development, watching for changes to run the tests is great; however, for our CI pipeline we would like to run the tests once, \ncreate a report so that we can see the results inside of our pipeline, and also determine the code coverage.\n\nExit the jest test watcher by pressing `CTRL` + `c` in your terminal. \n\n## Add unit test reporting and coverage\n\nTo view the unit test report, GitLab requires the runner to upload a JUnit report format XML file.\nWe will use `jest-junit` to generate this file. This is a unit test report for jest and will create an XML\nfile in the right format.\n\nTo install `jest-junit`, run the following command in your terminal:\n\n```\nnpm install --save-dev jest-junit\n```\n\nNow, add a new script to run the unit tests inside of your CI pipeline.\nAdd a `test:ci` script to your `package.json` that looks like this:\n\n```javascript\n\"scripts\": {\n    \"start\": \"react-scripts start\",\n    \"build\": \"react-scripts build\",\n    \"test\": \"react-scripts test\",\n    \"eject\": \"react-scripts eject\",\n    \"test:ci\": \"npm run test -- --testResultsProcessor=\\\"jest-junit\\\" --watchAll=false --ci --coverage\"\n  },\n```\n\n`--testResultsProcessor=\\\"jest-junit\\\"` tells jest to use the `jest-junit` library to create a unit test \nreport. `--watchAll=false` disables watch mode so that the tests will not rerun when something changes. `--ci` tells \nJest that it is running in a CI environment. `--coverage` tells Jest that test coverage information should be collected \nand reported in the output. For more information on these options, visit the [jest CLI options](https://jestjs.io/docs/cli) documentation.\n\n\nIf you run the new `test:ci` script, it will run the tests and create an XML file named `junit.xml` and print coverage statistics to the CLI.\n\n\n![CLI code coverage](https://about.gitlab.com/images/blogimages/2022-11-04-how-to-automate-testing-for-a-react-application-with-gitlab/coverage-cli.png){: .shadow}\n\n## Add unit tests to your CI pipeline\n\nIn the root of your application, create a file named `.gitlab-ci.yml`. \n\nDefine a test stage for your pipeline by adding the following code to your `.gitlab-ci.yml` file:\n\n```\nstages:\n  - test\n```\n\nNext, add a job named `unit-test` that will be responsible for running the unit tests in the test stage. Add the following code below the\ndefined stages:\n\n```\nunit-test:\n  image: node:latest\n  stage: test\n  before_script:\n    - npm install\n  script:\n    - npm run test:ci\n  coverage: /All files[^|]*\\|[^|]*\\s+([\\d\\.]+)/\n  artifacts:\n    paths:\n      - coverage/\n    when: always\n    reports:\n      junit:\n        - junit.xml\n```\n\nYour complete `.gitlab-ci.yml` file should look like this:\n\n```\nstages:\n  - test\n\nunit-test:\n  image: node:latest\n  stage: test\n  before_script:\n    - npm install\n  script:\n    - npm run test:ci\n  coverage: /All files[^|]*\\|[^|]*\\s+([\\d\\.]+)/\n  artifacts:\n    paths:\n      - coverage/\n    when: always\n    reports:\n      junit:\n        - junit.xml\n```\n\n\nBefore we push these changes to GitLab, add the following line to your `.gitignore`:\n\n```\njunit.xml\n```\n\nAdd your changes to GitLab by running these commands in your terminal:\n\n```\ngit add -a\ngit commit -m \"Adds .gitlab-ci.yml with unit testing\"\ngit push\n```\n\nWhen this command finishes, your code will be pushed to your project in GitLab and a pipeline will start \nautomatically running the `unit-test` job we defined earlier.\n\n![CI pipeline running](https://about.gitlab.com/images/blogimages/2022-11-04-how-to-automate-testing-for-a-react-application-with-gitlab/ci-pipeline-starting.png){: .shadow}\n\nWhen the pipeline completes, click the pipeline ID (_#680073569 in this case_).\n\nInside the pipeline, click the _Jobs_ tab and you should see the coverage for the unit-test job is 8.33%.\n\n![CI pipeline coverage](https://about.gitlab.com/images/blogimages/2022-11-04-how-to-automate-testing-for-a-react-application-with-gitlab/ci-pipeline-coverage.png){: .shadow}\n\nClick the _Tests_ tab and you can see the testing results for the unit-test job. \n\n![CI pipeline tests](https://about.gitlab.com/images/blogimages/2022-11-04-how-to-automate-testing-for-a-react-application-with-gitlab/ci-pipeline-tests.png){: .shadow}\n\nClick the name of the job _unit-test_ and you will see the status for each of the test suites run.\n\n![CI pipeline test details](https://about.gitlab.com/images/blogimages/2022-11-04-how-to-automate-testing-for-a-react-application-with-gitlab/ci-pipeline-test-details.png){: .shadow}\n\nCongratulations! You just added automated tests for your React application to your CI pipeline inside of GitLab and output the results to the pipeline.\n\nAll code for this tutorial can be found in this [project](https://gitlab.com/jwagner-demo/vandelay-industries/engineering/react-app).\n\nCover image by [Lautaro Andreani](https://unsplash.com/@lautaroandreani?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/react?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n\n## Related Posts\n- [The GitLab guide to modern software testing](https://about.gitlab.com/blog/the-gitlab-guide-to-modern-software-testing/)\n- [Unit Test Reports](https://docs.gitlab.com/ee/ci/testing/unit_test_reports.html)\n- [coverage keyword](https://docs.gitlab.com/ee/ci/yaml/#coverage)\n",[1328,894,110],{"slug":2293,"featured":6,"template":678},"how-to-automate-testing-for-a-react-application-with-gitlab","content:en-us:blog:how-to-automate-testing-for-a-react-application-with-gitlab.yml","How To Automate Testing For A React Application With Gitlab","en-us/blog/how-to-automate-testing-for-a-react-application-with-gitlab.yml","en-us/blog/how-to-automate-testing-for-a-react-application-with-gitlab",{"_path":2299,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2300,"content":2306,"config":2311,"_id":2313,"_type":16,"title":2314,"_source":17,"_file":2315,"_stem":2316,"_extension":20},"/en-us/blog/publishing-an-astro-site-with-pages",{"title":2301,"description":2302,"ogTitle":2301,"ogDescription":2302,"noIndex":6,"ogImage":2303,"ogUrl":2304,"ogSiteName":692,"ogType":693,"canonicalUrls":2304,"schema":2305},"How to publish your Astro Site with GitLab Pages","Learn how to deploy an Astro Site with GitLab Pages.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682473/Blog/Hero%20Images/shot-by-cerqueira-0o_GEzyargo-unsplash.jpg","https://about.gitlab.com/blog/publishing-an-astro-site-with-pages","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to publish your Astro Site with GitLab Pages\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Janis Altherr\"}],\n        \"datePublished\": \"2022-10-24\",\n      }",{"title":2301,"description":2302,"authors":2307,"heroImage":2303,"date":2308,"body":2309,"category":14,"tags":2310},[2190],"2022-10-24","\n\nAstro is an amazing new framework to create content-focused static sites and GitLab Pages is a great way to deploy a site built with Astro. Here's a step-by-step guide on how to build and deploy an Astro Site with GitLab Pages.\n\n## Create the project locally\n\nFirst, create the Astro Project locally using the Astro CLI.\n\nNote: Even though we're offering a [project template](https://gitlab.com/pages/astro),\nwe recommend using the CLI locally to scaffold your project. This ensures you can create your project with the latest defaults.\n\n```shell\nnpm create astro@latest\n```\n\nNow follow the CLI instructions. As part of the setup, Astro will create the\nproject folder for you. During the course of the setup Astro will ask whether you'd like to initialize a new Git repository. Answer this with `y` (yes).\n\nOnce the Astro CLI is done scaffolding your project, `cd` into the new folder:\n\n```shell\ncd \u003Cyour-project>\n```\n\n## Configure Astro for GitLab Pages\n\nAstro comes with a few defaults that are incompatible with GitLab Pages. So before continuing, we need to set up a compatible config.\nEdit your `astro.config.mjs` to include the following:\n\n```javascript\n// astro.config.mjs\nimport { defineConfig } from 'astro/config';\n\n// https://astro.build/config\nexport default defineConfig({\n  // GitLab Pages requires exposed files to be located in a folder called \"public\".\n  // So we're instructing Astro to put the static build output in a folder of that name.\n  outDir: 'public',\n\n  // The folder name Astro uses for static files (`public`) is already reserved\n  // for the build output. So in deviation from the defaults we're using a folder\n  // called `static` instead.\n  publicDir: 'static',\n});\n```\n\nWhy are we doing this? GitLab Pages is a way to publish some files in a\nrepository, no matter what build tool you used to generate them. Unlike with\nother deployment tools the exposed files and the source code can live \ntogether in one place. So to ensure you don't accidentally expose sensitive\nfiles we're requiring you to consciously put them into a\nfolder named \"public\".\n\nBy default, Astro uses `public` for something different – the static \nassets. So we have to change that behavior. The above config tells Astro\nthat we'll put the static files in a folder named `static` and want the _output_\nfiles to be put in a folder named, as required, `public`.\n\nAstro already generated that assets folder under the old name while\nscaffolding, so we'll have to rename it. Inside your Astro project folder, run:\n\n```shell\nmv public static\n```\n\nDepending on your project configuration, GitLab Pages will deploy your site \nat a URL that follows the format simlar to `https://\u003Cuser-or-group>.gitlab.\nio/\u003Cproject-name>`. If you want to use the default URL, you need to adjust Astro\nto the fact that the site is not mounted at the root path, otherwise it may \nnot load static assets (such as the CSS files) correctly. \n\n[Visit the documentation](https://docs.gitlab.com/ee/user/project/pages/getting_started_part_one.html#gitlab-pages-default-domain-names)\nto find out the URL schema of the project you intend to create, then add the\nfollowing line to your `astro.config.mjs`. (Skip this step if you're creating\na user or group page):\n\n```javascript\n// astro.config.mjs\nexport default defineConfig({\n  // ...\n  base: '/\u003Cproject-name>'\n  // In case the project is owned by a subgroup, use:\n  // base: '/\u003Csubgroup>/\u003Cproject-name>'\n});\n```\n\nAstro [recommends](https://docs.astro.build/en/reference/configuration-reference/#site) \nadding the final site's full URL to generate the sitemap, so add it now to your\n`astro.config.mjs`:\n\n```javascript\n// astro.config.mjs\nexport default defineConfig({\n  // ...\n  site: 'https://\u003Cuser-or-group>.gitlab.io'\n  \n  // Note: Instead of specifying both `base` and `site`, you can simply\n  // use the full URL here:\n  // site: 'https://\u003Cuser-or-group>.gitlab.io/\u003Cproject-name>'\n  // or for pages owned by a subgroup:\n  // site: 'https://\u003Cgroup>.gitlab.io/\u003Csubgroup>/\u003Cproject-name>'\n});\n```\n\nNow that you've successfully configured your project, you can commit your\nchanges.\n\n```shell\ngit add -A\ngit commit -m \"Initial commit\"\n```\n\n## Set up the remote repository\n\nYou can't push the code as we have yet to set up the remote repository. Visit\nGitLab and create a new project. When asked, select \"Create blank project.\"\n\nIn the setup screen, select \"GitLab Pages\" as the deployment target. Choose the\nvisibility level however you like. This is mainly asking whether your source \ncode is public, although it does affect the initial setting (see \"Making a \nprivate project's site public\" below).\n\nMake sure you unset the checkbox next to \"Initialize repository with a README\",\notherwise GitLab will begin a new Git history that you will have to reconcile\nwith your existing local one.\n\nOnce the Project is set up, follow the instructions on how to add an _existing\nrepository_ – if you don't have an existing remote, so you can just run:\n\n```shell\ngit remote add origin \u003Cgit-project-url>\ngit push -u origin --all\n```\n\nNow you've synced your local code with Gitlab, let's finish publishing it with\nPages.\n\n## Create a Pages pipeline\n\nIn GitLab, go to your project's settings and select Pages. You will be welcomed\nby a screen that helps you build a `.gitlab-ci.yml` file.\n\n![Screenshot: The \"Get stated with Pages\" UI](https://about.gitlab.com/images/blogimages/astro-pages/wizard_step_1.png)\n\nEnter \"node:lts\" as the build image. This will give you the latest node \nenvironment with long-time support.\n\nWe've already configured Astro to output our files in a folder named `public`,\nso you can check the checkbox asking you to confirm this.\n\nOn the next page, enter `npm ci` as the installation step. Running `npm ci` \ninstead of `npm install` is recommended for CI environments such as GitLab\nPipelines as it uses the `package-lock.json` to match the installed version \nwith the one you used during development. See the [npm documentation](https://docs.npmjs.com/cli/v8/commands/npm-ci)\nto learn more about `npm ci`.\n\n![Screenshot: Inputting the installation step](https://about.gitlab.com/images/blogimages/astro-pages/wizard_step_2.png)\n\nOn the last page, enter the build command \"npm run build\". Again, click \"next\".\n\n![Screenshot: Inputting the build step](https://about.gitlab.com/images/blogimages/astro-pages/wizard_step_3.png)\n\nNext to the inputs you see the pipeline file that has been built for you. \nThis is the one we want to add to the repository to enable Pages.\n\n![Screenshot: The finished file and the commit step](https://about.gitlab.com/images/blogimages/astro-pages/wizard_step_4.png)\n\nHow does it work in detail? If GitLab sees a job named `pages`, it will \nlook for artifacts inside a root folder `public` and then create a \nGitLab Pages deployment from it.\n\nThe `rules` section ensures the pages deployment is only triggered by \ncommits to the default branch. Every time you push a change to your default \nbranch, Pages will publish the new changes. \n\nIf you're happy with the pipeline, enter a commit message and click \"commit\".\n(Make sure you run `git pull` locally before doing any more changes to \nprevent issues with diverging histories.)\n\nNow having added a commit with a `.gitlab-ci.yml` file, GitLab has kicked off\na pipeline. Visit CI/CD > Pipelines to see the progress. After a couple of \nminutes, you should see the pipeline has succeeded. (If it's showing \"failed\", \nclick on the status button to see the job logs.)\n\n![Screenshot: Pipelines](https://about.gitlab.com/images/blogimages/astro-pages/pipeline_overview.png)\n\nOnce the pipeline has completed, go back to Settings > Pages. You should now see\nthe various settings of your site, including your new site's URL. Click on \nit and, congratulations, you've just deployed your Astro Site wit GitLab \nPages!\n\n![Screenshot: The deployed page](https://about.gitlab.com/images/blogimages/astro-pages/deployed_site.png)\n\n## Making a private project's site public\n\nBy default, a private project's Pages site is only accessible to project \nmembers. If you want your source code to be private, but still have a public \nsite, go to Settings/General and expand \"visibility, project features, permissions\", scroll down to \"Pages\" and set \nit to \"Everyone\".\n\n## Keep reading\n\n- [Tutorial: Use the GitLab UI to deploy your static site](https://docs.gitlab.com/ee/user/project/pages/getting_started/pages_ui.html)\n- [Astro Docs: Deploy your Astro Site to GitLab Pages](https://docs.astro.build/en/guides/deploy/gitlab/)\n- [Watch a video on how to create a Pages Pipeline with the Wizard](https://youtu.be/49hgxqPGofw)\n",[726,232,675],{"slug":2312,"featured":6,"template":678},"publishing-an-astro-site-with-pages","content:en-us:blog:publishing-an-astro-site-with-pages.yml","Publishing An Astro Site With Pages","en-us/blog/publishing-an-astro-site-with-pages.yml","en-us/blog/publishing-an-astro-site-with-pages",{"_path":2318,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2319,"content":2325,"config":2332,"_id":2334,"_type":16,"title":2335,"_source":17,"_file":2336,"_stem":2337,"_extension":20},"/en-us/blog/take-advantage-of-git-rebase",{"title":2320,"description":2321,"ogTitle":2320,"ogDescription":2321,"noIndex":6,"ogImage":2322,"ogUrl":2323,"ogSiteName":692,"ogType":693,"canonicalUrls":2323,"schema":2324},"Take advantage of Git rebase","Tap into the Git rebase features to improve your workflow.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665560/Blog/Hero%20Images/speedmonorepo.jpg","https://about.gitlab.com/blog/take-advantage-of-git-rebase","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Take advantage of Git rebase\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christian Couder\"}],\n        \"datePublished\": \"2022-10-06\",\n      }",{"title":2320,"description":2321,"authors":2326,"heroImage":2322,"date":2328,"body":2329,"category":14,"tags":2330},[2327],"Christian Couder","2022-10-06","\n\nThese days, developers spend a lot of time reviewing merge requests\nand taking these reviews into account to improve the code. We'll discuss how\n[Git rebase](https://git-scm.com/docs/git-rebase) can help in\nspeeding up these review cycles. But first, let's take a look at some\nworkflow considerations.\n\n## Different ways to rework a merge request\n\nA developer who worked on some code changes and created a merge\nrequest with these changes will often have to rework them. Why does\nthis happen? Tests can fail, bugs are found, or reviewers suggest\nimprovements and find shortcomings.\n\n### Simple but messy method: add more commits\n\nOne way to rework the code changes is to make more changes in some new\ncommits on top of the branch that was used to create the merge\nrequest, and then push the branch again to update the merge\nrequest.\n\nWhen a number of commits have been added in this way, the merge\nrequest becomes problematic:\n\n- It's difficult to review by looking at all the changes together.\n- It's difficult to review the commits separately as they may contain different unrelated changes, or even multiple reworks of the same code.\n\nReviewers find it easier to review changes split into a number of small,\nself-contained commits that can be reviewed individually.\n\n### Pro method: rebase!\n\nA better method to prepare or rework a merge request is to always\nensure that each commit contains small, self-contained, easy-to-review\nchanges.\n\nThis means that all the commits in the branch may need reworking\ninstead of stacking on yet more commits. This approach might seem much\nmore complex and tedious, but `git rebase` comes to the rescue!\n\n## Rework your commits with `git rebase`\n\nIf your goal is to build a merge request from a series of small,\nself-contained commits, your branch may need significant rework before its\ncommits are good enough. When the commits are ready, you can push the branch\nand update or create a merge request with this branch.\n\n### Start an interactive rebase\n\nIf your branch is based on `main`, the command to rework your branch\nis:\n\n```plaintext\ngit rebase -i main\n```\n\nI encourage you to create [a Git alias](https://git-scm.com/book/en/v2/Git-Basics-Git-Aliases),\nor a shell alias or function for this command right away, as you will\nuse it very often.\n\nThe `-i` option passed to `git rebase` is an alias for\n`--interactive`. It starts\n[an 'interactive' rebase](https://git-scm.com/docs/git-rebase#Documentation/git-rebase.txt---interactive)\nwhich will open your editor. In it, you will find a list of the\ncommits in your branch followed by commented-out lines beginning with\n`#`. The list of commits looks like this:\n\n```plaintext\npick 1aac632db2 first commit subject\npick a385014ad4 second commit subject\npick 6af12a88cf other commit subject\npick 5cd121e2a1 last commit subject\n```\n\nThese lines are instructions for how `git rebase` should handle these\ncommits. The commits are listed in chronological order, with the\noldest commit at the top. (This order is the opposite of the default\n`git log` order.) What do these lines contain?\n\n- An instruction (here, `pick`) that tells Git what action to take\n- An abbreviated commit ID\n- A commit subject to help you identify the commit contents\n\n### Edit the instruction list\n\nYou can edit these instructions! When you quit your text editor, `git rebase`\nreads the instructions you've just edited, and performs them\nin sequence to recreate your branch the way you want.\n\nAfter the instructions for all commits, a set of commented-out lines\nexplain how to edit the instruction lines, and how each instruction\nwill change your branch:\n\n- If you **delete a commit's entire instruction line** from the list,\n  that commit won't be recreated.\n- If you **reorder the instruction lines**, the commits will be\n  recreated in the order you specify.\n- If you **change the action** from `pick` to something else, such as\n  `squash` or `reword`, Git performs the action you specify on that\n  commit.\n- You can even **add new instruction lines** before, after, or between\n  existing lines.\n\nIf the comment lines aren't enough, more information about what you\ncan do and how it works is available in:\n\n- The [Git Tools - Rewriting History](https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History)\n  section of the \"Pro Git\" book\n- The [Interactive mode](https://git-scm.com/docs/git-rebase#_interactive_mode)\n  section of the `git rebase` documentation\n\n### Continue or abort the rebase\n\nAn interactive rebase can stop if there is a conflict (as a regular\nrebase would) or if you used an instruction like `edit` in the\ninstruction line. This allows you to make some changes, like splitting\nthe current commit into two commits, or fixing the rebase conflict if\nthere is one. You can then either:\n\n- Continue the interactive rebase with `git rebase --continue`.\n- Abort the rebase altogether with `git rebase --abort`.\n\n(These `git rebase` options also work when a regular, non-interactive\nrebase stops.)\n\n## Further tips and benefits\n\n### Try different instructions\n\nI recommend you try out the different instructions you can use in\neach instruction line, especially `reword`, `edit`, `squash`, and `fixup`. You'll\nsoon want to use the abbreviated versions of these instructions: `r`,\n`e`, `s`, and `f`.\n\n### Run shell commands in your rebase\n\nYou might also have noticed an `exec \u003Ccommand>` instruction that\nallows you to run any shell command at any point in the interactive rebase.\nI've found it more useful for non-interactive rebases, such as:\n\n```plaintext\ngit rebase --exec 'make test' main\n```\n\n(It's not an interactive rebase because it doesn't contain the `-i` flag.)\n\nThe `--exec \u003Ccommand>` flag allows you to run any shell command after\neach rebased commit, stopping if the shell command fails (which is\nsignaled by a non zero exit code).\n\n### Test all your commits\n\nPassing a command to build your software and run its tests, like\n`make test`, to `--exec` will check that each commit in your branch\nbuilds correctly and passes your tests.\n\nIf `make test` fails, the rebase stops. You can then fix the current\ncommit right away, and continue the rebase to test the next\ncommits.\n\nChecking each commit builds cleanly and passes all the tests ensures\nyour code base is always in a good state. It's especially useful if\nyou want to take advantage of\n[Git bisect](https://git-scm.com/docs/git-bisect) when you encounter\nregressions.\n\n## Conclusion\n\nIn Git, a rebase is a very versatile and useful tool to rework\ncommits. Use it to achieve a workflow with high-quality changes\nproposed in high-quality commits and merge requests. It makes your\ndevelopers and reviewers more efficient. Code reviews and debugging also become easier and more effective.\n\n**EDIT:** Check out our [follow-up post on how you can apply this is real life](/blog/rebase-in-real-life/).\n",[702,727,2331,726],"releases",{"slug":2333,"featured":6,"template":678},"take-advantage-of-git-rebase","content:en-us:blog:take-advantage-of-git-rebase.yml","Take Advantage Of Git Rebase","en-us/blog/take-advantage-of-git-rebase.yml","en-us/blog/take-advantage-of-git-rebase",{"_path":2339,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2340,"content":2346,"config":2351,"_id":2353,"_type":16,"title":2354,"_source":17,"_file":2355,"_stem":2356,"_extension":20},"/en-us/blog/amazon-linux-2-service-ready-partner",{"title":2341,"description":2342,"ogTitle":2341,"ogDescription":2342,"noIndex":6,"ogImage":2343,"ogUrl":2344,"ogSiteName":692,"ogType":693,"canonicalUrls":2344,"schema":2345},"GitLab is now an Amazon Linux 2 Service Ready Partner","Being an Amazon Linux 2 Service Ready partner shows GitLab's strong commitment to AWS linux distributions.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682451/Blog/Hero%20Images/isis-franca-hsPFuudRg5I-unsplash.jpg","https://about.gitlab.com/blog/amazon-linux-2-service-ready-partner","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab is now an Amazon Linux 2 Service Ready Partner\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2022-09-21\",\n      }",{"title":2341,"description":2342,"authors":2347,"heroImage":2343,"date":2348,"body":2349,"category":14,"tags":2350},[1701],"2022-09-21","\n\nSeveral months ago, we shared that GitLab started officially supporting Amazon Linux 2 as well as providing packages for GitLab and GitLab Runner for x86 and Graviton ARM architectures.\n\nGitLab’s hard working Enablement Engineering team has taken this commitment to the next level by acquiring Amazon’s Service Ready Partner designation for Amazon Linux 2.\n\nThe AWS Service Ready program requires that GitLab provide specific evidence in regard to support, compatibility testing and security testing in order to deploy GitLab on Amazon Linux 2 with confidence.\n\nHere is GitLab’s [Amazon Linux 2 Service Ready Partner listing](https://aws.amazon.com/amazon-linux-2/partners/?partner-solutions-cards.sort-by=item.additionalFields.partnerNameLower&partner-solutions-cards.sort-order=asc&awsf.partner-solutions-filter-partner-type=*all&partner-solutions-cards.q=GitLab&partner-solutions-cards.q_operator=AND).\n\n## Amazon Linux 2 support in GitLab 15.0\n\nAmazon Linux 2 is supported in GitLab 15.0 and later. An [earlier blog](/blog/amazon-linux-2-support-and-distro-specific-packages/) discusses a variety of important points and provides some code in order to plan a smooth transition.\n\nThe Service Ready Designation has been received for version 15.3, but there were no changes made to the process from 15.0 to support the designation.\n\nGitLab Runner has had ARM64 binaries since 12.6.0 and now has Amazon Linux 2 RPM packages for those wanting package-based installs.\n\n## Inside the distribution team process for distribution support\n\nIt would be easy to think that adding support for additional Linux distros is a simple and easy process - but there is actually a lot of effort that goes into it. GitLab’s Distribution Team uses GitLab itself to apply full DevOps disciplines to the continuous building, testing and distribution of packaging for Amazon Linux. Here are just some of the steps in preparing a GitLab release for packaging:\n\n- Create an elastic scaling distro-specific CI build environment.\n- Create a distro-specific CI test environment.\n- 2380 compatibility tests are performed on the GitLab code base.\n- SAST and dependency security scanning are completed and a specific escalation procedure is applied for any vulnerabilities that are found.\n- Primary distributions such as distro specific .deb and .rpm packages are prepared specifically for each distribution.\n- Secondary distributions are done as well - this is when the official GitLab AMI is created.\n- CI builds and testing generally happen multiple times a week for Amazon Linux.\n\n![Amazon Linux 2 Test Results](https://about.gitlab.com/images/blogimages/2022-09-amazonlinux/al2testsubgroups.png)\n\n![Amazon Linux 2 Test Results](https://about.gitlab.com/images/blogimages/2022-09-amazonlinux/al2tests.png)\n\n## Need-to-know takeaways\n\n- GitLab is now an official Amazon Linux 2 Service Ready Partner.\n- Amazon Linux 2 RPM packages are available for GitLab from version 15.0 and for GitLab Runner.\n\n> **Note**\n> This blog post and linked pages contain information related to upcoming products, features, and functionality. It is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog post and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc.\n\n![AWS Partner Logo](https://about.gitlab.com/images/blogimages/2022-09-amazonlinux/amazonlinuxandgravitonready.png){: .right}\n",[771,283,894],{"slug":2352,"featured":6,"template":678},"amazon-linux-2-service-ready-partner","content:en-us:blog:amazon-linux-2-service-ready-partner.yml","Amazon Linux 2 Service Ready Partner","en-us/blog/amazon-linux-2-service-ready-partner.yml","en-us/blog/amazon-linux-2-service-ready-partner",{"_path":2358,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2359,"content":2365,"config":2371,"_id":2373,"_type":16,"title":2374,"_source":17,"_file":2375,"_stem":2376,"_extension":20},"/en-us/blog/a-visual-guide-to-gitlab-ci-caching",{"title":2360,"description":2361,"ogTitle":2360,"ogDescription":2361,"noIndex":6,"ogImage":2362,"ogUrl":2363,"ogSiteName":692,"ogType":693,"canonicalUrls":2363,"schema":2364},"A visual guide to GitLab CI/CD caching","Learn cache types, as well as when and how to use them.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682443/Blog/Hero%20Images/cover.jpg","https://about.gitlab.com/blog/a-visual-guide-to-gitlab-ci-caching","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A visual guide to GitLab CI/CD caching\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matthieu Fronton\"}],\n        \"datePublished\": \"2022-09-12\",\n      }",{"title":2360,"description":2361,"authors":2366,"heroImage":2362,"date":2368,"body":2369,"category":14,"tags":2370},[2367],"Matthieu Fronton","2022-09-12","\n\nIf you've ever worked with GitLab CI/CD you may have needed, at some point, to use a cache to share content between jobs. The decentralized nature of GitLab CI/CD is a strength that can confuse the understanding of even the best of us when we want to connect wires all together. For instance, we need to know critical information such as the difference between artifacts and cache and where/how to place setups.\n\nThis visual guide will help with both challenges.\n\n## Cache vs. artifacts\n\nThe concepts _may_ seem to overlap because they are about sharing content between jobs, but they actually are fundamentally different:\n\n- If your job does not rely on the the previous one (i.e. can produce it by itself but if content already exists the job will run faster), then use cache.\n- If your job does rely on the output of the previous one (i.e. cannot produce it by itself), then use artifacts and dependencies.\n\nHere is a simple sentence to remember if you struggle between choosing cache or artifact:\n> Cache is here to speed up your job but it may not exist, so don't rely on it.\n\nThis article will focus on **cache**.\n\n## Initial setup\n\nWe'll go with a simple representation of the GitLab CI/CD pipelining model and ignore (for now) that the jobs can be executed on any runners and hosts. It will help get the basics.\n\nLet's say you have:\n- 1 project with 3 branches\n- 1 host running 2 docker runners\n\n![Initial setup](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-1.png){: .shadow.center}\n\n## Local cache: Docker volume\n\nIf you want a [local cache](https://docs.gitlab.com/ee/ci/caching/index.html#where-the-caches-are-stored) between all your jobs running on the same runner, use the [cache statement](https://docs.gitlab.com/ee/ci/yaml/#cache) in your `.gitlab-ci.yml`:\n\n```yaml\ndefault:\n  cache:\n    path:\n      - relative/path/to/folder/*.ext\n      - relative/path/to/another_folder/\n      - relative/path/to/file\n```\n\n![local / container / all branches / all jobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-2.png){: .shadow.center}\n\nUsing the [predefined variable](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html) `CI_COMMIT_REF_NAME` as the [cache key](https://docs.gitlab.com/ee/ci/yaml/index.html#cachekey), you can ensure the cache is tied to a specific branch:\n\n```yaml\ndefault:\n  cache:\n    key: $CI_COMMIT_REF_NAME\n    path:\n      - relative/path/to/folder/*.ext\n      - relative/path/to/another_folder/\n      - relative/path/to/file\n```\n\n![local / container / one branch / all jobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-3.png){: .shadow.center}\n\nUsing the [predefined variable](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html) `CI_JOB_NAME` as the [cache key](https://docs.gitlab.com/ee/ci/yaml/index.html#cachekey), you can ensure the cache is tied to a specific job:\n\n![local / container / all branch / one jobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-4.png){: .shadow.center}\n\n## Local cache: Bind mount\n\nIf you don't want to use a volume for caching purposes (debugging purpose, cleanup disk space more easily, etc.), you can configure a [bind mount for Docker volumes](https://docs.docker.com/storage/bind-mounts/) while registering the runner. With this setup, you do not need to set up the [cache statement](https://docs.gitlab.com/ee/ci/yaml/#cache) in your `.gitlab-ci.yml`:\n\n```yaml\n#!/bin/bash\n\ngitlab-runner register                             \\\n  --name=\"Bind-Mount Runner\"                       \\\n  --docker-volumes=\"/host/path:/container/path:rw\" \\\n...\n```\n\n![local / one runners / one host / all branch / all jobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-5.png){: .shadow.center}\n\nIn fact, this setup even allows you to share a cache between jobs running on the same host without requiring you to set up a distributed cache (which we'll talk about later):\n\n```yaml\n#!/bin/bash\n\ngitlab-runner register                             \\\n  --name=\"Bind-Mount Runner X\"                     \\\n  --docker-volumes=\"/host/path:/container/path:rw\" \\\n...\n\ngitlab-runner register                                 \\\n  --name=\"Bind-Mount Runner Y\"                         \\\n  --docker-volumes=\"/host/path:/container/alt/path:rw\" \\\n...\n```\n\n![local / multiple runners / one host / all branch / all jobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-6.png){: .shadow.center}\n\n## Distributed cache\n\nIf you want to have a [shared cache](https://docs.gitlab.com/runner/configuration/autoscale.html#distributed-runners-caching) between all your jobs running on multiple runners and hosts, use the \u003Ca href=\"https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-runnerscache-section\">[runner.cache]\u003Ca> section in your `config.toml`:\n\n```yaml\n[[runners]]\n  name = \"Distributed-Cache Runner\"\n...\n  [runners.cache]\n    Type = \"s3\"\n    Path = \"bucket/path/prefix\"\n    Shared = true\n    [runners.cache.s3]\n      ServerAddress = \"s3.amazonaws.com\"\n      AccessKey = \"\u003Cchangeme>\"\n      SecretKey = \"\u003Cchangeme>\"\n      BucketName = \"foobar\"\n      BucketLocation = \"us-east-1\"\n```\n\n![remote / multiple runners / multiple hosts / all branch / all jobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-7.png){: .shadow.center}\n\nUsing the predefined variable `CI_COMMIT_REF_NAME` as the cache key you can ensure the cache is tied to a specific branch between multiple runners and hosts:\n\n![remote / multiple runners / multiple hosts / one branch / all jobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-8.png){: .shadow.center}\n\n## Real-life setup\n\nThe above assumptions allowed you to harness your understanding of the concepts and possibilities.\n\nIn real life, you'll face more complex wiring and we hope this article will help you as a visual cheatsheet along with the reference documentation.\n\nJust to give you a sneak peek, here is an exercise for you:\n\n- Set up a cache between all the jobs of a specific stage, running on any runner and any hosts, but only between pipeline of the same branches:\n\n![Real-life test assignment](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-9.png){: .shadow.center}\n\nHappy caching, folks!\n\n\n\nCover image by [Alina Grubnyak](https://unsplash.com/@alinnnaaaa) on [Unsplash](https://unsplash.com)\n{: .note}\n",[832,937,894,726],{"slug":2372,"featured":6,"template":678},"a-visual-guide-to-gitlab-ci-caching","content:en-us:blog:a-visual-guide-to-gitlab-ci-caching.yml","A Visual Guide To Gitlab Ci Caching","en-us/blog/a-visual-guide-to-gitlab-ci-caching.yml","en-us/blog/a-visual-guide-to-gitlab-ci-caching",{"_path":2378,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2379,"content":2384,"config":2390,"_id":2392,"_type":16,"title":2393,"_source":17,"_file":2394,"_stem":2395,"_extension":20},"/en-us/blog/speed-up-your-monorepo-workflow-in-git",{"title":2380,"description":2381,"ogTitle":2380,"ogDescription":2381,"noIndex":6,"ogImage":2322,"ogUrl":2382,"ogSiteName":692,"ogType":693,"canonicalUrls":2382,"schema":2383},"Speed up your monorepo workflow in Git","Tap into the features that can reap huge savings in the long run for any developer team.","https://about.gitlab.com/blog/speed-up-your-monorepo-workflow-in-git","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Speed up your monorepo workflow in Git\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Cai\"}],\n        \"datePublished\": \"2022-09-06\",\n      }",{"title":2380,"description":2381,"authors":2385,"heroImage":2322,"date":2387,"body":2388,"category":14,"tags":2389},[2386],"John Cai","2022-09-06","\n\nMonorepos have grown in popularity in recent years. For many of us, they are a\npart of our daily Git workflows. The trouble is working with them can be slow. Speeding up\na developer's workflow can reap huge savings in the long run for any team.\n\nFirst, a word about monorepos. What does it mean for a repository to be a\nmonorepo anyway? Well, it depends who you ask and the definition has become\nmore flexible over time, but here are a few.\n\n## Characteristics of monorepos\n\nMonorepos have the following characteristics.\n\n### Multiple sub-projects\n\nThe typical definition of \"monorepo\" is a repository that contains multiple sub-projects. For instance, let's imagine a repository with a web-facing front end,\na backend, an iOS app directory, and an android app directory:\n\n```\nawesome-app/\n|\n|--backend/\n|\n|--web-frontend/\n|\n|--app-ios/\n|\n|--app-android/\n\n```\n\n`awesome-app` is a single repository:\n\n```\ngit clone https://my-favorite-git-hosting-service.com/awesome-app.git\n```\n\nThe [Chromium](https://github.com/chromium/chromium) repository is a good\nexample of this.\n\n### Large files\n\nRepositories can also grow to be very large if large files are checked in. In\nsome cases, binaries or other large assets such as images are checked into the\nrepository to have their history tracked. Other times, large files are inadvertently \nintroduced into the repository. The way Git history works, even if these files are\nimmediately removed, the single version that was checked in remains.\n\n### Old projects with deep histories\n\nWhile Git is very good at compressing text files, when a Git repository has a deep history,\nthe need to keep all versions of a file around can cause the size of the repository to be huge.\n\nThe [Linux](https://github.com/torvalds/linux) repository is a good example of this.\n\nFor instance, the Linux project's first Git commit is from [April 2005](https://github.com/torvalds/linux/commit/1da177e4c3f41524e886b7f1b8a0c1fc7321cac2).\n\nAnd a `git rev-list --all --count` gives us 1,120,826 commits! That's a lot of\nhistory! Getting into Git internals a little bit, Git keeps a commit object, and a\ntree object for each commit, as well as a copy of the files at that snapshot\nin history. This means a deep Git history means a lot of Git data.\n\n## Speeding up your Git workflow\n\nHere are some features to help speed up your Git workflow.\n\n### Sparse checkout\n\n[git sparse checkout](https://git-scm.com/docs/git-sparse-checkout) reduces the\nnumber of files you check out to a subset of the repository. (NOTE: This feature\nin Git is still marked experimental.) This is especially useful in the case of\n[many sub-projects in a repository](#multiple-sub-projects).\n\nTaking our [example](#multiple-sub-projects) of a monorepo with multiple\nsub-projects, let's say that as a front-end web developer I only need to make\nchanges to `web-frontend/`.\n\n```sh\n> git clone --no-checkout https://my-favorite-git-hosting-service.com/awesome-app.git\n> cd awesome-app\n> git sparse-checkout set web-frontend\n> git checkout\nYour branch is up to date with 'origin/master'.\n> ls\n> web-frontend README.md\n```\n\nOr, if you've already checked out a worktree, sparse checkout can be used to remove\nfiles from the worktree.\n\n\n```sh\n> git clone https://my-favorite-git-hosting-service.com/awesome-app.git\n> cd awesome-app\n> ls\n> backend web-frontend app-ios app-android README.md\n> git sparse-checkout set web-frontend\nUpdating files: 100% (103452/103452), done.\n> ls\n> web-frontend README.md\n```\n\nSparse checkout will only include the directories indicated, plus all files\ndirectly under the root repository directory.\n\nThis way, we only checkout the directories that we need, saving both space locally\nand time since each time `git pull` is done, only files that are checked out will\nneed to be updated.\n\nMore information can be found in the [docs](https://git-scm.com/docs/git-sparse-checkout)\nfor sparse checkout.\n\n### Partial clone\n\n[git partial clone](https://docs.gitlab.com/ee/topics/git/partial_clone.html#:~:text=Partial%20clone%20is%20a%20performance,0%20or%20later%20is%20required) has a similar goal to sparse checkout in reducing the number\nof files in your local Git repository. It provides the option to filter out\ncertain types of files when cloning.\n\nPartial clone is used by passing the `--filter` option to `git-clone`.\n\n```sh\ngit clone --filter=blob:limit=10m\n```\n\nThis will exclude any files over 10 megabytes from being copied to the local\nrepository. A full list of supported filters are included in the\n[docs for git-rev-list](https://git-scm.com/docs/git-rev-list#Documentation/git-rev-list.txt",[702,727,726],{"slug":2391,"featured":6,"template":678},"speed-up-your-monorepo-workflow-in-git","content:en-us:blog:speed-up-your-monorepo-workflow-in-git.yml","Speed Up Your Monorepo Workflow In Git","en-us/blog/speed-up-your-monorepo-workflow-in-git.yml","en-us/blog/speed-up-your-monorepo-workflow-in-git",{"_path":2397,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2398,"content":2404,"config":2410,"_id":2412,"_type":16,"title":2413,"_source":17,"_file":2414,"_stem":2415,"_extension":20},"/en-us/blog/whiteboarding-remote-work-superpower",{"title":2399,"description":2400,"ogTitle":2399,"ogDescription":2400,"noIndex":6,"ogImage":2401,"ogUrl":2402,"ogSiteName":692,"ogType":693,"canonicalUrls":2402,"schema":2403},"Virtual whiteboarding is a remote work super power","Want to master a collective understanding of technical explanations remotely? Learn how to use virtual whiteboards to their maximum capabilities in this tutorial.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682431/Blog/Hero%20Images/kvalifik-5Q07sS54D0Q-unsplash.jpg","https://about.gitlab.com/blog/whiteboarding-remote-work-superpower","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Virtual whiteboarding is a remote work super power\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2022-09-01\",\n      }",{"title":2399,"description":2400,"authors":2405,"heroImage":2401,"date":2406,"body":2407,"category":14,"tags":2408},[1701],"2022-09-01","\n\nAt one point in my career I had a solo business in technology training. During this time, I went through a transition from live, in-person classes to live-delivered virtual classes. One of the things that I dearly missed in virtual delivery was unpacking explanations through whiteboarding. The difference in the speed and completeness of achieving common understanding across the group was very evident by the nature of the questions and discussions that occured immediately afterward. \n\nAt that time, it was difficult to find solutions that enabled me to do this as fluidly as an in-person classroom experience. I persisted and came up with an elaborate solution involving software and hardware – but the results of a fluid whiteboarding experience for both presenter and participants were preserved.\n\n## Explaining and collaborating through drawings\n\n“[The Back of the Napkin: Solving Problems and Selling Ideas with Pictures](https://www.penguinrandomhouse.com/books/300247/the-back-of-the-napkin-expanded-edition-by-dan-roam/)” is a great book on the topic of leveraging drawing in business meetings. The title contains two main themes. “Selling Ideas” is about explaining something you already understand in a highly effective way that creates shared understanding. “Solving Problems” is a very different mode, that of collaborating to create a new visual model that documents the structure of a problem or envisions a new solution. While there are variations on these two themes, these appear to be two most fundamental modes of using drawing in a group context.\n\n## The importance of progressive disclosure in understanding technical explanations \n\nTechnical explanation is challenging on its own - but the situation is made much worse by presenting complex visuals fully formed. Using a whiteboard for the same explanation leverages the progressive disclosure element of storytelling and overlays it on a technical visualization. This has a fundamental effect of enhancing understanding because when a complex visual appears completely formed, the visual cortex hijacks all neurological attention resources (including listening) as it attempts to make sense of the scene. Progressive disclosure allows the minds of listeners to focus on the verbal explanation because only the component being described is visible. As the narrator reveals the next chunk by drawing while explaining the relationship to the last chunk, the audience naturally shifts their full attention.\n\nYou could think of this effect as being the same as what a cartoon strip does to create a sense of progressive disclosure. By simply framing the scenes, our mind automatically focuses on one frame at a time in the intended sequence. The difference with technical explanations is that the final view is extremely informative without frames - it paints a global picture of the sum of the parts.\n\nThe magic of infographics is also the enforcement of progressive disclosure on their topic matter by purposely creating a visual so long that it must be scrolled. Frequently they are also organized around a natural or contrived timeline that the disclosure of the component parts progresses through. They frequently also create frames through visual effects such as lines, shapes, and whitespace.\n\nTechnical visualizations frequently have the characteristic that a big-picture visual is very valuable for complete understanding. Progressive disclosure resolves the contradictory requirements for both a “parts-level understanding” and a “big-picture understanding” of a technical design visualization. This is accomplished by layering up the big picture through many bite-sized explanations - exactly like how the mind's eye creates the world of a story when it is narrated in a sequence of small parts.\n\nWhiteboarding, by nature, can only be done as progressive disclosure and, in doing so, it transforms technical explanation into much more digestible and memorable frames for the audience to consume.\n\n## Maintaining a common understanding of the composite vision\n\nIn order to have the best chance to foster a \"group creative flow,\" everyone who is collaborating needs to maintain a common understanding of the composite vision as rapidly emerging ideas and insights are iteratively worked in. Whiteboarding fulfills this need in a way that is not distracting to the effort because a visualization of the vision is maintained in real-time during collaboration.\n\nFrequently group insights compound on each other as ideas are expanded by building on idea expressed by someone else in the group. Whiteboarding provides a real-time composite visualization which accelerates more new and valuable insights. Drawing frequently enables collaborators to draw things they can't find the words for at the moment. If the conditions are right, there is the potential for a snowball effect of synergistic ideas being incorporated into the composite whole.\n\nThis is where the need for the mechanism for holding the common understanding needs to be fluid and non-distracting.\n\n## Solutions architecture requires both technical explanation and collaboration\n\nIn my job as a Solutions Architect, it turns out that explanation of technical visuals and collaboration in creating technical designs are critical to making helpful contributions to colleagues, customers, and partners.\n\nIt is truly amazing how much more quickly a group of people can get on the same page and innovate when whiteboarding is available.\n\nWhen there are language barriers, it takes on an even higher value as visuals are not dependent on language and can help store the real-time common understanding between different language speakers. Multiple times, I’ve found that speakers who are working through a translator get excited and are emboldened to start talking in English (my native tongue). Generally, the technical terms are recognized in any language and adding them to the diagram fuels more mutual understanding.\n\nWhen I came to GitLab as a Solutions Architect, I, once again, began to experiment with ways to make fluid whiteboarding easy to do in any meeting.\n\n## Better than real whiteboards for in-person meetings\n\nOccasionally, when working hard for one objective, you accidentally achieve some objectives you didn’t even know you should or could have. This is known as serendipity.\n\nThis is what has happened in my pursuit of very fluid virtual whiteboarding. I found virtual whiteboarding handles a lot of logistics and practical considerations for whiteboarding for in-person meetings such as:\n\n* Verifying availability of whiteboards at a meeting venue\n* Simultaneous whiteboards and computer projection visibility (I’ve been in rooms where you had to stop projecting use the whiteboard)\n* You don’t even need a projector if you do an in-person virtual meeting to share your screen\n* Marker management - smell, mess, dried-out markers\n* The inability to preserve every whiteboard that you draw due to needing to erase for the next one\n* The inability to electronically store or share the visuals\n\n## The challenges of hardware and software selection\n\nI wish I could honestly say the process of putting together a fluid virtual whiteboard setup is now easy but I have not found that to be the case.\n\n### Mental flow requires fluid technology\n\nWhether explaining or collaborating the concept of mental flow is very critical. If the need for flow is interrupted by things that should be transparent, it is frustrating for everyone and the audience quickly loses attention. It interrupts the thoughts of both the whiteboarder and the participants.\n\nThink of the times that someone starts whiteboarding and the one and only marker goes dry and they have to hunt for one. Virtual whiteboarding can actually make the problem of interrupting flow much worse. This is because if there are delays in the hardware or software, the shape of what you are drawing gets incorrectly “smoothed”. \n\nA lack of fluidity will generally make your shapes challenging to draw and then you slow down to allow the system to recognize your strokes and, well, it’s not fluid anymore - it’s distracting and effortful. And the rending of shapes aren’t the worst of it, when trying to add text to label diagram parts, lack of fluidity causes the smaller strokes of text to be unrecognizable. A lack of fluid drawing completely kills the presenters desire to use drawing and the audiences desire to listen to the pictures.\n\n### Fluidity of the drawing activity\n\nIn trying to devise a cost-effective, yet fluid, setup I’ve tried all the shortcuts - such as using capacitive touchscreens with a stylus and using web apps as the primary whiteboarding software. Both of these are deal breakers for me because after trying many instances on these two options, they just never work out to have sufficient fluidity. \n\nSo here are the constraints I ended up adopting to make drawing itself very fluid:\n\n* **Use an iOS or Android mobile platform tablet** - as it has the following advantages:\n    * Native mobile apps are much, much more fluid than web apps.\n    * Many more native software options than there are for laptops.\n    * More modular and cheaper hardware in the long run than attempting to gain these capabilities in a laptop or desktop.\n* **Must support active pen technology** - capacitive touchscreens, even with a stylus, don’t cut it. When working rapidly, the smoothing algorithms aren’t very smooth - this makes drawing shapes difficult, but more importantly it makes writing words especially difficult.\n* **Having a stylus that is the correct length and thickness** is important for fluid writing and drawing. The stylus that comes with tablets is frequently not the conventional length or thickness of real pens or pencils.\n\n### Fluidity of integrating the act of drawing into virtual meetings\n\nUsing drawing needs to be easy for the meeting host and for the participants.\n\n* For easy virtual sharing, it helps if the native app also has a collaborative web app that updates quickly as it avoids the complications of joining the tablet to the meeting and sharing from there. \n* This enables other meeting participants to whiteboard on the same virtual whiteboard without specialized hardware.\n* Some systems allow guests to join and whiteboard without having to setup an account.\n\n### Fluidity of availability of whiteboarding across teams\n\nThere are multiple elements of what will ensure fluid whiteboarding is available to everyone for collaborations. A primary one is cost, followed by local device availability of active pen tablet options in international markets. Thankfully, the applications covered later support both iOS and Android which helps in finding affordable and locally available options.\n\n* Cost\n* Mobile apps tend to be more likely to be available for both major mobile operating systems compared to a native desktop-only solution being available on multiple desktop platforms\n* Mobile platform flexibility and global cost and availability compared to pursuing laptops with the same capability\n\n## A working example setup\n\n![](https://about.gitlab.com/images/blogimages/virtualwhiteboarding/whiteboarding-setup-samsung8.jpg)\n\n### Tablet\n\n* My first tablet was a [Samsung Galaxy Tab A 8.0 with Spen (SM-P200)](https://www.amazon.com/gp/product/B07TS2N27S/) that cost me USD $235. It is actually an international edition as the US market does not seem to offer an active pen tablet in the 8” format. In this case, the stylus fits inside the tablet so is really not appropriate for fluid drawing and writing. \n* Since my first purchase, Samsung has come out with a less expensive line of large tablets with active pen technology, so I now also have the [Samsung Galaxy Tab S6 Lite 10.4 inch (SM-P610NZBAXAR)](https://www.amazon.com/SAMSUNG-Android-Included-Speakers-SM-P610NZBAXAR/dp/B086Z3S3MY/), which I obtained for USD $250. While the stylus in this unit is considered more full-sized, the thickness, feel, forefinger button and length all cause me to reach for my after-market stylus for the most fluid experience.\n\n### Stylus\n\n* For a stylus, I use the [STAEDTLER 180 Noris digital classic EMR Stylus](https://www.amazon.com/STAEDTLER-22-1-digital-compatibility-purchase/dp/B0728HBD7F). I find the length, weight, and lack of buttons all helpful in handling drawing and writing smoothly. The color makes it a little less easy to lose. The downsides include that there are very few tablet cases that can accommodate it and that it looks so much like a pencil that someone in your household may accidentally toss it in the regular pen jar. Always be sure to verify stylus compatibility with your device's active pen technology.\n\n### Settings\n\nI almost returned the 10” tablet due to a behavior that a couple settings fixed. Android tablets with no physical buttons put a navigation bar on the bottom part of the screen. I found that I was having my palm be picked up by the Home Screen button, which would close the whiteboarding app. Also, when any of these buttons are activated by the stylus, it is equally disruptive to the flow. For the Navigation Bar settings, I set “Navigation type” to “Swipe gestures” and I enable “Block gestures with S Pen” (Android 12, Samsung OneUI v4.1).\n\n### Collaborative whiteboard options\n\nThere are multiple options for the collaborative whiteboard options - some completely free and paid options that carry enough value-add features to consider it for heavy users.\n\n\n\u003Ctable>\n  \u003Ctr>\n   \u003Ctd>\n   \u003C/td>\n   \u003Ctd>\u003Ca href=\"https://www.liveboard.online\">Liveboard Online\u003C/a>\n   \u003C/td>\n   \u003Ctd>\u003Ca href=\"https://support.google.com/jamboard/answer/7424836?hl=en\">Google Jamboard\u003C/a>\n   \u003C/td>\n   \u003Ctd>\u003Ca href=\"https://miro.com\">Miro\u003C/a>\n   \u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n   \u003Ctd>Long Term Usage of Free Tier\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n   \u003Ctd>No - Only 3 Boards\n   \u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n   \u003Ctd>Native Mobile Apps\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n   \u003Ctd>Native Desktop Apps\n   \u003C/td>\n   \u003Ctd>No\n   \u003C/td>\n   \u003Ctd>No\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n   \u003Ctd>Web App (for screen sharing)\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n   \u003Ctd>Fluidity Extras\n   \u003C/td>\n   \u003Ctd>\n   \u003C/td>\n   \u003Ctd>\n   \u003C/td>\n   \u003Ctd>Multiple pens on deck\n   \u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n   \u003Ctd>Pages / Canvas\n   \u003C/td>\n   \u003Ctd>Pages\n   \u003C/td>\n   \u003Ctd>Pages\n   \u003C/td>\n   \u003Ctd>Endless Canvas\n   \u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n   \u003Ctd>Presentation Mode\n   \u003C/td>\n   \u003Ctd>No\n   \u003C/td>\n   \u003Ctd>No\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n   \u003Ctd>Direct Import From Google Slides\n   \u003C/td>\n   \u003Ctd>No\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n   \u003Ctd>No\n   \u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n   \u003Ctd>Shape Recognition from hand drawing\n   \u003C/td>\n   \u003Ctd>Light Duty\n   \u003C/td>\n   \u003Ctd>Light Duty\n   \u003C/td>\n   \u003Ctd>Awesome\n   \u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n   \u003Ctd>Reusability of Drawing as Formal Technical Diagrams\n   \u003C/td>\n   \u003Ctd>No\n   \u003C/td>\n   \u003Ctd>No\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n  \u003C/tr>\n  \u003Ctr>\n   \u003Ctd>Viewers Synchronized To Drawing Location\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n   \u003Ctd>Yes\n   \u003C/td>\n   \u003Ctd>Manual\n   \u003C/td>\n  \u003C/tr>\n\u003C/table>\n\nSee here for more [GitLab remote work whiteboarding information](/company/culture/all-remote/collaboration-and-whiteboarding/).\n\nPhoto by [Kvalifik](https://unsplash.com/@kvalifik?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/whiteboard?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n",[750,1347,2409],"remote work",{"slug":2411,"featured":6,"template":678},"whiteboarding-remote-work-superpower","content:en-us:blog:whiteboarding-remote-work-superpower.yml","Whiteboarding Remote Work Superpower","en-us/blog/whiteboarding-remote-work-superpower.yml","en-us/blog/whiteboarding-remote-work-superpower",{"_path":2417,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2418,"content":2424,"config":2430,"_id":2432,"_type":16,"title":2433,"_source":17,"_file":2434,"_stem":2435,"_extension":20},"/en-us/blog/postman-integration-with-gitlab-makes-your-api-workflows-easier",{"title":2419,"description":2420,"ogTitle":2419,"ogDescription":2420,"noIndex":6,"ogImage":2421,"ogUrl":2422,"ogSiteName":692,"ogType":693,"canonicalUrls":2422,"schema":2423},"Postman integration with GitLab makes API workflows easier","Learn how to use the git integration to link APIs in Postman to GitLab cloud repos.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671434/Blog/Hero%20Images/introducing-continuous-workflows.jpg","https://about.gitlab.com/blog/postman-integration-with-gitlab-makes-your-api-workflows-easier","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Postman integration with GitLab makes API workflows easier\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andy Rogers\"}],\n        \"datePublished\": \"2022-08-24\",\n      }",{"title":2419,"description":2420,"authors":2425,"heroImage":2421,"date":2427,"body":2428,"category":14,"tags":2429},[2426],"Andy Rogers","2022-08-24","\n\nAPIs are more than just an interface. From a development lifecycle perspective, an API includes source code, definition files, tests, performance measurements, documentation, security audits, deployments, and feedback from API consumers. All of these elements are required for a successful API implementation. So, in partnership with GitLab, Postman created a git integration that allows users to link APIs in Postman to their GitLab cloud repos (on-prem versions of GitLab are only supported on [Postman Enterprise](https://www.postman.com/pricing/)).\n\nThe [Postman API Platform](https://blog.postman.com/new-postman-api-platform-redefining-api-management-for-api-first-world/) is designed to help teams collaborate seamlessly by providing tools for the entire API lifecycle. We understand that a fundamental part of the API lifecycle includes [developer workflows](https://blog.postman.com/the-reimagined-api-first-workflow-for-developers/) centered around code and source control.\n\n![illustration](https://about.gitlab.com/images/blogimages/postman1.png){: .shadow}\n\n## 4 key benefits for better collaboration\n\nThe launch of this integration earlier in the year provides four key benefits that empower teams to work faster and better together:\n\n**1.** It introduces the concept of version control into Postman. Users are now able to manage and sync branches, releases, versions, and tags for their APIs in GitLab and Postman. \n\n\n![screenshot of drop-down menu](https://about.gitlab.com/images/blogimages/postman2.png){: .shadow}\n\n\n**2.** Elements created in Postman can be pushed to a user’s GitLab repository, where the schema and collections can coexist alongside the source code. Likewise, branching workflows that your team might already be using can now be followed in Postman; external changes to code and API definitions are reviewable and can be merged back to Postman.\n\n\n![screenshot of branch info](https://about.gitlab.com/images/blogimages/postman3.png){: .shadow}\n\n**3.** This integration enables developers to think about API elements as the API itself, instead of treating code, API definitions, documentation, collections, tests, monitors, etc. as independent entities. All of these constitute the API. Moreover, this allows a higher-level view of the entire API, rather than just the source code — a critical requirement for any organization who wants to build a structured and robust API program.\n\n\n![screenshot of API info](https://about.gitlab.com/images/blogimages/postman4.png){: .shadow}\n\n\n**4.** The Postman-GitLab integration greatly minimizes the likelihood that downstream teams and API consumers will interact with outdated (or even deprecated) APIs or API elements. Users don’t have to spend time deciphering what API, collection, or documentation is current, since they can see what version they are working with all the way back to the code. In Postman, users also have direct access to real-time collaborative tools such as commenting and forking/merging to maintain synchronization between downstream API consumption and the source of truth.\n\n![illustration](https://about.gitlab.com/images/blogimages/postman5.png){: .shadow}\n\n## An integration for the API-first world\n\nOur partnership with GitLab supports our commitment to building Postman as the platform for the [API-first world](https://api-first-world.com/). With integrations like this, [API-first companies](https://blog.postman.com/what-is-an-api-first-company/) are now more productive, can deliver higher-quality products, and are able to build stronger ecosystems of developers, partners, and consumers. \n\nTo get started with the GitLab integration, check out [our guide](https://blog.postman.com/the-reimagined-api-first-workflow-for-developers/) and our how-to video for GitLab integration config:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/BL8DFOPncMc\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n_Andy Rogers is product manager at Postman._\n\n\n",[894,727,232],{"slug":2431,"featured":6,"template":678},"postman-integration-with-gitlab-makes-your-api-workflows-easier","content:en-us:blog:postman-integration-with-gitlab-makes-your-api-workflows-easier.yml","Postman Integration With Gitlab Makes Your Api Workflows Easier","en-us/blog/postman-integration-with-gitlab-makes-your-api-workflows-easier.yml","en-us/blog/postman-integration-with-gitlab-makes-your-api-workflows-easier",{"_path":2437,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2438,"content":2443,"config":2448,"_id":2450,"_type":16,"title":2451,"_source":17,"_file":2452,"_stem":2453,"_extension":20},"/en-us/blog/why-we-have-implemented-our-own-sshd-solution-on-gitlab-sass",{"title":2439,"description":2440,"ogTitle":2439,"ogDescription":2440,"noIndex":6,"ogImage":1579,"ogUrl":2441,"ogSiteName":692,"ogType":693,"canonicalUrls":2441,"schema":2442},"Why we implemented our own SSHD solution","Until recently we used OpenSSH Server to handle SSH connections to provide SSH-related features, but we ultimately decided to implement our own SSHD solution. Learn more!","https://about.gitlab.com/blog/why-we-have-implemented-our-own-sshd-solution-on-gitlab-sass","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we implemented our own SSHD solution\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Igor Drozdov\"}],\n        \"datePublished\": \"2022-08-17\",\n      }",{"title":2439,"description":2440,"authors":2444,"heroImage":1579,"date":2445,"body":2446,"category":14,"tags":2447},[1181],"2022-08-17","\n\nThe story of why we moved to our own SSHD is an interesting one. GitLab provides [a number of features](https://gitlab.com/gitlab-org/gitlab-shell/-/blob/main/doc/features.md) that execute via an SSH connection. The most popular one is Git-over-SSH, which enables communicating with a Git server via SSH. Historically, we implemented the features through a combination of OpenSSH Server and a separate component, a binary called GitLab Shell. GitLab Shell processes every connection established by OpenSSH Server to communicate data back and forth between the SSH client and the Git server. The solution was battle-tested, and relied on a trusted component such as OpenSSH. Here's why we decided to implement our own SSHD.\n\n## Community contribution\n\n[Everyone can contribute at GitLab](/company/mission/#mission)! A [community contribution](https://gitlab.com/gitlab-org/gitlab-shell/-/merge_requests/394) from [@lorenz](https://gitlab.com/lorenz), `gitlab-sshd`, was suggested as a lightweight alternative to our existing setup. A self-contained binary with minimal external dependencies would be beneficial for containerized deployments. A GitLab-supported replacement also opened up new opportunities:\n\n- PROXY protocol support to enable [Group IP address restriction via SSH](https://gitlab.com/gitlab-org/gitlab/-/issues/271673): Group IP address restriction didn’t work for the Gitlab Shell + OpenSSH solution, because OpenSSH didn't provide PROXY protocol support. As a result, Gitlab Shell couldn't see the real users’ IP addresses. We had to either use a patched version of OpenSSH, or implement our own solution to support the PROXY protocol. Then, with our own solution, we could enable PROXY protocol, receive the real IP addresses of users, and provide Group IP address restriction functionality.\n- Kubernetes compatibility with graceful shutdown, liveness, and readiness probes: With OpenSSH, we had no control over established connections. When Kubernetes pods were rotated, all the ongoing connections were immediately dropped, and thus could interrupt long-running `git clone` operations. With a dedicated server, the connections now become manageable and can be shut down gracefully: the server listens for an interrupting signal and, when the signal is received, stops accepting new connections and waits for a grace period before shutting down completely. This grace period gives ongoing connections an opportunity to finish.\n- Prometheus metrics and profiling became possible: In our previous approach, Gitlab Shell was just a binary that created a process that lived as long as the SSH connection lived. This approach didn’t provide a straightforward way to run a metrics server. With a dedicated server, we can now collect metrics and implement detailed logging for monitoring and debugging purposes.\n- Performance and resource usage is significantly lower in some scenarios: Lightweight goroutines are cheaper than spawning a separate process for every SSH connection. Spawning separate processes performed better in basic cases, but our real-world scenarios didn't demonstrate a drastic performance improvement. However, with `gitlab-sshd` it became possible to introduce a Go profiler to surface performance problems, which was a significant improvement from an operating perspective.\n- Reduced attack surface by using only a restricted set of SSH implementation features: With the previous approach, we allowed establishing an SSH connection to the OpenSSH server, but restricted it to a specific feature set. With `gitlab-sshd`, any unpredictable call to an OpenSSH feature that we don’t support is no longer possible, dramatically reducing the attack surface.\n- Simplified architecture is now easier to understand:\n\n### The previous architecture\n\n![Old architecture](https://about.gitlab.com/images/blogimages/create-source-code/gitlab-sshd/old-architecture.png)\n\n### Our current architecture\n\n![New architecture](https://about.gitlab.com/images/blogimages/create-source-code/gitlab-sshd/new-architecture.png)\n\n## Risks and challenges\n\nHowever, changing a critical component that is broadly used, and is responsible for security, carries tremendous risks. We experienced both challenges and risks:\n\n- **Security perspective**: An SSH server is a critical component; it establishes a secure connection between a user and a server, and allows them to communicate privately. Any failures could open security holes, permitting anything from authentication bypass to remote code execution. To mitigate the risks, we performed multiple rounds of security reviews – before the development (by examining the used components), during the development, and after the working versions of the code were deployed to staging environments.\n- **Operational perspective**: The component is broadly used. Any failures would affect a vast number of users. To mitigate the risks, we rolled the changes out gradually to 1%, 5%, etc. of the traffic, and rolled back if a problem was encountered. After 8 attempts, we had the server successfully running for 100% of traffic!\n\n## Problems we encountered\n\nA component with a scope this broad could have a wide range of problems. We encountered, and resolved, the following problems:\n\n- **Incompatibility with other in-progress features:** Our first `gitlab-sshd` deployment consumed huge amounts of memory. It interacted negatively with another feature under development at the same time. We must always keep the interaction with other components in mind when introducing a general component.\n- **Limited feature set in the `golang.org/x/crypto` library:** This library establishes SSH connection, and has limited support for algorithms and features available in OpenSSH. We created [our own fork](https://gitlab.com/gitlab-org/golang-crypto) to provide the missing features:\n  - The OpenSSH client has deprecated SHA-1 based signatures in host certificates in version 8.2 because of safety reasons; however, backward compatibility is provided by the `server-sig-algs` extension, but [`golang.org/x/crypto`](https://pkg.go.dev/golang.org/x/crypto) didn't support it. We [started supporting](https://gitlab.com/gitlab-org/golang-crypto/-/merge_requests/1) this extension.\n  - Some MACs and key exchange algorithms are unsupported: `hmac-sha2-512` and `hmac-sha2-256` are the most noticeable. We [started supporting](https://gitlab.com/gitlab-org/golang-crypto/-/merge_requests/4) these algorithms.\n  - Buggy SSH clients, such as `gpg-agent v2.2.4` and `OpenSSH v7.6` shipped in `Ubuntu 18.04`, might send `ssh-rsa-512` as the public key algorithm but actually include a `rsa-sha` signature. We had to [relax the RSA signature check](https://gitlab.com/gitlab-org/golang-crypto/-/merge_requests/9) to resolve this issue.\n- **Re-implementing options available in OpenSSH:** Familiar OpenSSH options like `LoginGraceTime` and `ClientAliveInterval` were unavailable, so we implemented multiple alternatives to preserve the features we needed.\n\n## Lessons learned\n\nUnfortunately, issues became visible on production environment, thanks both to the load and the variety of possible OpenSSH configurations. Even though we caught some bugs on our staging environments, predicting all types of problems was almost impossible. However, these actions helped us resolve the issues:\n\n- **Incremental rollouts:** The rollout plan proved to be extremely effective. It enabled us to iterate without disrupting service to most users.\n- **Seeking multiple perspectives:** We sought a diverse set of opinions from a variety of groups, such as Security, Infrastructure, Quality and Scalability. It helped us to evaluate the project from multiple perspectives, mitigate the risks, and prevent the majority of issues from happening.\n\n",[1286,1307],{"slug":2449,"featured":6,"template":678},"why-we-have-implemented-our-own-sshd-solution-on-gitlab-sass","content:en-us:blog:why-we-have-implemented-our-own-sshd-solution-on-gitlab-sass.yml","Why We Have Implemented Our Own Sshd Solution On Gitlab Sass","en-us/blog/why-we-have-implemented-our-own-sshd-solution-on-gitlab-sass.yml","en-us/blog/why-we-have-implemented-our-own-sshd-solution-on-gitlab-sass",{"_path":2455,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2456,"content":2461,"config":2467,"_id":2469,"_type":16,"title":2470,"_source":17,"_file":2471,"_stem":2472,"_extension":20},"/en-us/blog/upgrading-database-os",{"title":2457,"description":2458,"ogTitle":2457,"ogDescription":2458,"noIndex":6,"ogImage":1579,"ogUrl":2459,"ogSiteName":692,"ogType":693,"canonicalUrls":2459,"schema":2460},"We are upgrading the operating system on our Postgres database clusters","Learn when these upgrades will happen and how they will help boost performance and reliability on GitLab.com.","https://about.gitlab.com/blog/upgrading-database-os","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We are upgrading the operating system on our Postgres database clusters\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David Smith\"}],\n        \"datePublished\": \"2022-08-12\",\n      }",{"title":2457,"description":2458,"authors":2462,"heroImage":1579,"date":2464,"body":2465,"category":14,"tags":2466},[2463],"David Smith","2022-08-12","\nContinuing on the theme of [improving the performance and reliability of GitLab.com](/blog/path-to-decomposing-gitlab-database-part1/), we have another step we will be taking for our clusters of Postgres database nodes. These nodes have been running on Ubuntu 16.04 with extended security maintenance patches and it is now time to get them to a more current version. Usually, this kind of upgrade is a behind-the-scenes event, but there is an underlying technicality that will require us to take a maintenance window to do the upgrade (more on that [below](#the-challenge)).\n\nWe have been preparing for and [practicing this upgrade](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/7577) and are now ready to schedule the window to do this work for GitLab.com.\n\n## When will the OS upgrade take place and what does this mean for users of GitLab.com?\n\nThis change is planned to take place on 2022-09-03 (Saturday) between 11:00 UTC and 14:00 UTC. The implementation of this change is anticipated to include a **service downtime of up to 180 minutes** (see [reference issue](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/7543)). During this time you will experience complete service disruption of GitLab.com.\n\nWe are taking downtime to ensure that the application works as expected following the OS upgrade and to minimize the risk of any data integrity issues.\n\n> Join us at [GitLab Commit 2022](/events/commit/) and connect with the ideas, technologies, and people that are driving DevOps and digital transformation.\n\n## Background\n\nGitLab.com's [database architecture](/handbook/engineering/infrastructure/production/architecture/#database-architecture) uses two Patroni/Postgres database clusters: main and CI. We recently did functional decomposition and now the CI Cluster stores the data generated by CI GitLab features. Each Patroni cluster has primary and multiple read-only replicas. For each of the Patroni clusters, the Postgres database size is ~18 TB running on Ubuntu 16.04. During the scheduled change window, we will be switching over to our newly built Ubuntu 20.04 clusters.\n\n## The challenge\n\nUbuntu 18.10 introduced an updated version of glibc (2.28), which includes a [major update to locale data](https://wiki.postgresql.org/wiki/Locale_data_changes) and causes Postgres indexes created with earlier versions of glibc to be corrupted. Because we are upgrading to Ubuntu 20.04, our indexes are affected by this. Therefore, during the downtime window scheduled for this work, we need to detect potentially corrupt indexes and have them reindexed before we enable production traffic again. We currently have the following types and the approximate number of indexes:\n\n```\n Index Type | # of Indexes\n------------+--------------\n btree      |         4079\n gin        |          101\n gist       |            3\n hash       |            1\n```\n\nAs you can appreciate, given the sheer number (and size) of these indexes, it would take far too long to reindex every single index during the scheduled downtime window, so we need to streamline the process.\n\n## Options to upgrade to Ubuntu 20.04 safely\n\nThere are a number of ways to deal with the problem of potentially corrupt indexes:\n\na. Reindex **all** indexes during the scheduled downtime window\n\nb. Transport data to target 20.04 clusters in a logical (not binary) way, including:\n\n  - Backups/upgrades using pg_dump\n  - Logical replication\n\nc. Use streaming replication from 16.04 to 20.04 and during the downtime window, break replication and promote the 20.04 clusters followed by reindexing of potentially corrupt indexes\n\nIt might be feasible for a small to a medium-size Postgres implementation to use options a or b; however, at the GitLab.com scale, it would require a much larger downtime window and our aim is to reduce the impact to our customers as much as possible.\n\n## High-level approach for the OS upgrade\n\nTo perform an OS upgrade on our Patroni clusters, we use Postgres streaming replication to replicate data from our current Ubuntu 16.04 clusters to the brand new Ubuntu 20.04 standby Patroni clusters. During the scheduled downtime window, we will stop all traffic to the current 16.04 clusters, promote the 20.04 clusters by making them Primary and demote the Ubuntu 16.04 clusters by reconfiguring to act as Standby while replicating from the new 20.04 primaries. We will then reindex all the identified potentially corrupt indexes, and update DNS to point the application to the new 20.04 Patroni clusters before opening traffic to the public.\n\n## Identifying potentially corrupt indexes and our approach to handling the reindexing for different types of indexes\n\n### B-Tree\n\nWe use `bt_index_parent_check` [amcheck function](https://www.postgresql.org/docs/12/amcheck.html) to identify potentially corrupt indexes and we will reindex them during the downtime window.\n\n### GiST and Hash\n\nSince we do not have many GiST and Hash indexes, and reindexing them is a relatively quick operation, we will reindex them all during the downtime window.\n\n### GIN\n\nCurrently, the production version of amcheck is limited to detecting potential corruption in B-Tree indexes only. Our GIN indexes are reasonably sized and it would require a significant amount of time to reindex them during the scheduled downtime window, which is not feasible as we cannot have the site unavailable to our customers for that long. We have collaborated closely with our database team to produce a list of business-critical GIN indexes to be reindexed **during** the downtime window, and any other GIN indexes will be reindexed immediately after we open up traffic to the public using the [CONCURRENTLY](https://www.postgresql.org/docs/current/sql-reindex.html#SQL-REINDEX-CONCURRENTLY) option. Using this option means it will take longer to reindex, but it allows normal operations to continue while the indexes are being rebuilt.\n\n## Performance improvements\n\nWe started looking into options to improve the performance of the reindexing (see [reference issue](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/15559#note_940517257)). There are a couple of areas where we needed to improve performance.\n\n### Identify potentially corrupt B-Tree indexes quickly\n\nWhen we first started using the amcheck to identify potentially corrupt indexes, it was single threaded so it was taking just under five days to run the amcheck script to identify potentially corrupt indexes on production data. After a few iterations, our amcheck script now runs a separate background worker process for each index, so we essentially get a performance improvement of about 96 times when we use a 96 CPU core VM to run amcheck. The performance is limited by the time it takes to run amcheck on the largest index. The script is customizable to skip or include a specific set of tables/indexes, and we can decide the number of parallel worker processes to use based on the number of CPU cores available on the VM we use to run amcheck. Now with the improved speed, we can run the amcheck script on a copy of production data a day or two before the scheduled OS upgrade downtime window.\n\n### Improve reindexing speed to reduce the downtime\n\nOur initial test to reindex was performed sequentially with the default Postgres parameters. We have tested reindexing with different Postgres parameters and parallelized the reindex process. We are now able to perform our reindexing in less than half the time it used to take to reindex.\n\n## Reading material\n\nFor more information, please see the following links:\n\n- [Ubuntu 20.04 Upgrade Epic](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/637)\n- [Research on the types of indexes and steps to identify corruption](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/15384#note_867281334)\n",[676,704,894],{"slug":2468,"featured":6,"template":678},"upgrading-database-os","content:en-us:blog:upgrading-database-os.yml","Upgrading Database Os","en-us/blog/upgrading-database-os.yml","en-us/blog/upgrading-database-os",{"_path":2474,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2475,"content":2481,"config":2487,"_id":2489,"_type":16,"title":2490,"_source":17,"_file":2491,"_stem":2492,"_extension":20},"/en-us/blog/path-to-decomposing-gitlab-database-part1",{"title":2476,"description":2477,"ogTitle":2476,"ogDescription":2477,"noIndex":6,"ogImage":2478,"ogUrl":2479,"ogSiteName":692,"ogType":693,"canonicalUrls":2479,"schema":2480},"Decomposing the GitLab backend database, Part 1: Designing and planning","A technical summary of the yearlong project to decompose GitLab's Postgres database. This first part focuses on the initial designing and planning of the project.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663397/Blog/Hero%20Images/logoforblogpost.jpg","https://about.gitlab.com/blog/path-to-decomposing-gitlab-database-part1","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Decomposing the GitLab backend database, Part 1: Designing and planning\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dylan Griffith\"}],\n        \"datePublished\": \"2022-08-04\",\n      }",{"title":2476,"description":2477,"authors":2482,"heroImage":2478,"date":2484,"body":2485,"category":14,"tags":2486},[2483],"Dylan Griffith","2022-08-04","\nRecently we finished [migrating the GitLab.com monolithic Postgres database to two independent databases: `Main` and `CI`](/blog/splitting-database-into-main-and-ci/). After we decided how to split things up, the project took about a year to complete.\n\nThis blog post on decomposing the GitLab backend database is part one in a three-part series. The posts give technical details about many of the challenges we had to\novercome, as well as links to issues, merge requests, epics, and developer-facing documentation.\nOur hope is that you can get as much detail as you want about how we work on complex projects at GitLab.\n\nWe highlight the most interesting details, but anyone undertaking a similar\nproject might learn a lot from seeing all\nthe different trade-offs we evaluated along the way.\n\n- \"Decomposing the GitLab backend database, Part 1\" focuses on the initial design and planning of the project.\n- [Part 2](/blog/path-to-decomposing-gitlab-database-part2/) focuses on the\nexecution of the final migration.\n- [Part 3](/blog/path-to-decomposing-gitlab-database-part3/) highlights some interesting technical challenges we had to solve along the way, as well as some surprises.\n\n## How it began\n\nBack in early 2021, GitLab formed a \"database sharding\" team in an effort to\ndeal with our ever-growing monolithic Postgres database. This database stored\nalmost all the data generated by GitLab.com users, excluding git data and some other\nsmaller things.\n\nAs this database grew over time, it became a common source of\nincidents for GitLab. We knew that eventually we had to move away from a single\nPostgres database. We were already approaching the limits of what we could do\non a single VM with 96 vCPU and continually trying to vertically scale this VM\nwould eventually not be possible. Even if we could vertically scale forever,\nmanaging such a large Postgres database just becomes more and more difficult.\n\nEven though our database architecture has been monolithic for a long time, we already made use of many scaling techniques, including:\n\n- Using Patroni to have a pool of replicas for read-only traffic\n- Using PGBouncer for pooling the vast number of connections across our application fleet\n\n![Database architecture before decomposition](https://about.gitlab.com/images/blogimages/2022-07-15-path-to-decomposing-gitlab-database/phase0.png)\n\nThese approaches only got us so far and ultimately would never fix the scaling\nbottleneck of the number of writes that need to happen, because all writes need to\ngo to the primary database.\n\nThe original objective of the database sharding team was to find a viable way\nto horizontally shard the data in the database. We started with exploring\n[sharding by top-level namespace][sharding_by_top_level_namespace_poc_epic]. This approach had some very complicated problems to solve, because the application\nwas never designed to have strict tenancy boundaries around top-level\nnamespaces. We believe that ultimately this will be a good way to split and\nscale the database, but we needed a shorter term solution to our scaling\nproblems.\n\nThis is when we evaluated different ways to extract certain tables into a\nseparate database. This approach is often referred to as \"vertical\npartitioning\" or \"functional decomposition.\" We assumed this extraction would likely\nbe easier, as long as we found a set of tables with loose coupling to the rest\nof the database. We knew it would require us to remove all joins to the rest of the\ntables (more on that later).\n\n## Figuring out where most write activity occurs\n\nWe did [an analysis][analysis_of_decomposition_tables] of:\n\n- Where the bulk of our data was stored\n- The write traffic (since ultimately the number of writes was the thing we were trying to reduce)\n\nWe learned that CI tables (at the time) made up around 40% to 50% of our write traffic. This seemed like a\nperfect candidate, because splitting the database in half (by write traffic) would be\nthe optimal scaling step.\n\nWe analyzed the data by splitting the database the following ways:\n\n| Tables group   | DB size (GB) | DB size (%) | Reads/s   | Reads/s (%) | Writes/s | Writes/s (%) |\n|----------------|--------------|-------------|-----------|-------------|----------|--------------|\n| Webhook logs   | 2964.1       | 22.39%      | 52.5      | 0.00%       | 110.0    | 2.82%        |\n| Merge Requests | 2673.7       | 20.20%      | 126073.4  | 1.31%       | 795.4    | 20.40%       |\n| CI             | 4725.0       | 35.69%      | 1712843.8 | 17.87%      | 1909.2   | 48.98%       |\n| Rest           | 2876.3       | 21.73%      | 7748488.5 | 80.82%      | 1083.6   | 27.80%       |\n\nChoosing to split the CI tables from the database was partly based on instinct.\nWe knew the CI tables (particularly `ci_builds` and\nrelated metadata) were already some of the largest tables in our database. It\nwas also a convenient choice because the CI tables were already prefixed with\n`ci_`. In the end, we realized only three tables were CI tables that weren't\nprefixed with `ci_`. You can see the up-to-date list of tables and their respective\ndatabase in [`gitlab_schemas.yml`][gitlab_schemas_yml].\n\nThe next step was to see how viable it actually was.\n\n## Proving it can work\n\nThe [first proof-of-concept merge request][initial_poc_mr_for_ci_decomposition] was created\nin August 2021. The proof-of-concept process involved:\n\n- Separating the database and seeing what broke\n- Fixing blockers and marking todo's until we ended up with the application \"pretty much working\"\n\nWe never merged this proof of concept, but we progressively broke out changes into smaller merge requests\nor issues assigned to the appropriate teams to fix.\n\n![Screenshot of large proof-of-concept MR](https://about.gitlab.com/images/blogimages/2022-07-15-path-to-decomposing-gitlab-database/poc-mr-scale.png)\n\n## Chasing a moving target\n\nWhen tackling a large-scale architecture change, you might find\nyourself chasing a moving target.\n\nTo split the database, we had to change the application. Our code depended on all\nthe tables being in a single database. These changes took almost a year.\n\nIn the meantime, the application was constantly evolving\nand growing, and with contributions from many engineers who weren't necessarily\nfamiliar with the CI decomposition project. This meant that we couldn't just\nstart fixing problems. We knew we would likely find new problems being\nintroduced at a faster rate than we could remove them.\n\nTo solve this problem, we took an approach that was inspired by\n[how we handle new RuboCop rules](https://docs.gitlab.com/ee/development/contributing/style_guides.html#resolving-rubocop-exceptions).\nThe idea is to implement static or dynamic analysis to detect these\nproblems. Then we use this information to generate an allowlist of exceptions.\nAfter we have this allowlist of exceptions, we prevent any new violations from being created\n(as any new violations will fail the pipeline).\n\nThe result was a clear list to work on and visibility into our progress.\n\nAs part of making the application compatible with CI decomposition, we needed to\nbuild the following:\n\n- [Multiple databases documentation][docs_multiple_databases] taught\n  developers how to write code that is compatible with multiple databases.\n- [Cross-join detection][mr_cross_join_detection] analyzed all SQL queries\n  and raised an error if the query spanned multiple databases.\n- [Cross-database transaction detection][mr_cross_db_transaction_detection]\n  analyzed all transactions and raised an error if queries were sent to two\n  different databases within the context of a single transaction.\n- [Query analyzer metrics][mr_query_analyzer_metrics] analyzed all SQL queries\n  and tracked the different databases that would be queried (based on table\n  names). These metrics, which were sampled at a rate of 1/10,000 queries, because they are\n  expensive to parse, were sent to Prometheus. We used this data to get a sense\n  of whether we were whittling down the list of cross-joins in production.\n  It also helped us catch code paths that weren't covered by tests but were\n  executed in production.\n- [A Rubocop rule for preventing the use of\n  `ActiveRecord::Base`][mr_rubocop_rule_ar_base] ensured that we always\n  used an explicit database connection for Main or CI.\n\n## Using Rails multiple database support\n\nWhen we began this project, there were many improvements being added to Rails to\nsupport multiple databases. We wanted to make use of as much of this Rails\nbuilt-in support as possible to minimize the amount of custom database\nconnection logic we had to maintain.\n\nOne considerable challenge with this was our existing\n[custom database load balancing logic](https://docs.gitlab.com/ee/administration/postgresql/database_load_balancing.html).\nThe development of this complex implementation spans a long period of time, and\nit was designed differently to how Rails connections were managed in the new\nmulti-database support.\n\nIn the end, were able to use parts of Rails multiple database support, but\n[we still hope to one day remove our custom logic and only use what is supported by Rails][epic_to_move_to_native_rails_multiple_dataabase_support].\n\n## Implementing loose foreign keys\n\nThere were still some foreign keys that existed between CI and non-CI tables.\nWe needed a way to remove these keys but still keep the functionality of cascading\ndeletes.\n\nIn the end, [we implemented a solution][lfk_mr]\nwe call [\"loose foreign keys\"][lfk_docs]. This solution provides similar functionality and\nsupport for cascading `NULLIFY` or `DELETE` when a parent record is deleted in\nPostgres. It's implemented using Postgres on delete triggers, so it guarantees all\ndeletes (including bulk deletes) will be handled. The trigger writes to another\n\"queue\" table in Postgres, which then is picked up by a periodic Sidekiq worker\nto clean up all the impacted child records.\n\nWhen implementing this solution, we also considered the option of using\n[`ActiveRecord` `before_destroy` callbacks](https://apidock.com/rails/ActiveRecord/Callbacks/before_destroy).\nHowever they couldn't give us the same guarantees as Postgres foreign keys,\nbecause they can be intentionally or accidentally skipped.\n\nIn the end, the \"loose foreign keys\" solution also helped to solve another problem\nwe have, where very large cascading deletes cause timeouts and user experience issues.\nBecause it's asynchronous, we could easily control timing and batch sizes to never\nhave database timeouts and never overload the database with a single large\ndelete.\n\n## Mirroring namespaces and projects\n\nOne of the most difficult dependencies between CI and Main features in GitLab\nis how CI Runners are configured. Runners are assigned to projects and groups\nwhich then dictates which jobs they will run. This meant there were many join\nqueries from the `ci_runners` table to the `projects` and `namespaces` tables.\nWe solved most of these issues by refactoring our Rails code and queries, but\nsome proved very difficult to do efficiently.\n\nTo work around this issue, [we implemented][mr_namespace_project_mirroring] a mechanism to\n[mirror the relevant columns on `projects` and `namespaces` to the CI database][docs_ci_mirrored_tables].\n\nIt's not ideal to have to duplicate data that must be kept up-to-date like\nthis, but while we expected this may be necessary in a few places, it turns out\nthat we only ended up doing this for those two tables. All other joins could be\nhandled without mirroring.\n\nAn important part of our mirroring architecture is periodic\n[consistency checking][mr_namespace_project_mirroring_consistency_check].\nEvery time this process runs, it takes a batch of the mirrored rows and compares them\nwith the expected values. If there is a discrepancy, it schedules them to be fixed.\nAfter it's done with this batch, it updates a cursor in Redis to be used for the\nnext batch.\n\n## Creating a phased rollout strategy\n\nA key part of ensuring our live migration went as smooth as possible was by\nmaking it as small as possible. This was quite difficult as the migration from\n1 database to 2 databases is a discrete change that seems hard to break up into\nsmaller steps that can be rolled out individually.\n\nOne [early insight][initial_migration_plan_mr] was that we could actually reconfigure GitLab.com ahead of\ntime so that the Rails application behaved as though it was talking to two\nseparate databases long before we actually split the databases. Basically the\nidea was that the Rails processes already had two separate database connections,\nbut ultimately they were going to the same database. We could even break things\nout further since our read-only connections are designed to read from slightly\ndelayed replicas. So we could already have read-only connections going to the\nnewly created CI read-only replicas before the migration.\n\n![Database architecture before final migration step](https://about.gitlab.com/images/blogimages/2022-07-15-path-to-decomposing-gitlab-database/phase4.png)\n\nThese insights led to our [seven-phase migration process][phased_migration_epic].\nThis process meant that by the time we got to the final migration on production\n(Phase 7), we were already incredibly confident that the application would work\nwith separate databases and the actual change being shipped was just trivial\nreconfiguration of a single database host. This also meant that all phases\n(except for Phase 7) had a very trivial rollback process, introduced very\nlittle risk of incident and could be shipped before we were finished with every\ncode change necessary to make the application support two databases.\n\nThe seven phases were:\n\n1. Deploy a Patroni cluster\n2. Configure Patroni standby cluster\n3. Serve CI reads from CI standby cluster\n4. Separate write connections for CI and Main (still going to the same primary host)\n5. Do a staging dry run and finishing the migration plan\n6. Validate metrics and additional logging\n7. Promote the CI database and send writes to it\n\n## Using labels to distribute work and prioritize\n\nNow that we had a clear set of phases we could prioritize our work. All issues\nwere assigned [scoped labels](https://docs.gitlab.com/ee/user/project/labels.html#scoped-labels)\nbased on the specific phase they corresponded to. Since the work spanned many\nteams in development and infrastructure, those teams could use the\nlabel to easily tell which issues needed to be worked on first. Additionally,\nsince we kept an up-to-date timeline of when we expected to ship each phase,\neach team could use the phase label to determine a rough deadline of when that\nwork should get done to not delay the project. Overall there were at least 193\nissues over all phases. Phase 1 and 2 were mostly infrastructure tasks tracked\nin a different group and with different labels, but the other phases contained\nthe bulk of the development team requirements:\n\n1. [8 Phase 3 issues](https://gitlab.com/gitlab-org/gitlab/-/issues/?sort=created_date&state=opened&label_name%5B%5D=ci-decomposition%3A%3Aphase3)\n1. [78 Phase 4 issues](https://gitlab.com/gitlab-org/gitlab/-/issues/?sort=created_date&state=opened&label_name%5B%5D=ci-decomposition%3A%3Aphase4)\n1. [7 Phase 5 issues](https://gitlab.com/gitlab-org/gitlab/-/issues/?sort=created_date&state=opened&label_name%5B%5D=ci-decomposition%3A%3Aphase5)\n1. [64 Phase 6 issues](https://gitlab.com/gitlab-org/gitlab/-/issues/?sort=created_date&state=opened&label_name%5B%5D=ci-decomposition%3A%3Aphase6)\n1. [34 Phase 7 issues](https://gitlab.com/gitlab-org/gitlab/-/issues/?sort=created_date&state=opened&label_name%5B%5D=ci-decomposition%3A%3Aphase7)\n\n## Continue reading\n\nYou can read more about the final migration process and results of the migration in [Part 2](/blog/path-to-decomposing-gitlab-database-part2/).\n\n[initial_poc_mr_for_ci_decomposition]: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/67486\n[initial_migration_plan_mr]: https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/84588\n[lfk_mr]: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/69165\n[lfk_docs]: https://docs.gitlab.com/ee/development/database/loose_foreign_keys.html\n[epic_to_move_to_native_rails_multiple_dataabase_support]: https://gitlab.com/gitlab-org/gitlab/-/issues/296870\n[phased_migration_epic]: https://gitlab.com/groups/gitlab-org/-/epics/6160\n[sharding_by_top_level_namespace_poc_epic]: https://gitlab.com/groups/gitlab-org/-/epics/5838\n[analysis_of_decomposition_tables]: https://gitlab.com/groups/gitlab-org/-/epics/5883#summary-of-impact\n[gitlab_schemas_yml]: https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/database/gitlab_schemas.yml\n[docs_ci_mirrored_tables]: https://docs.gitlab.com/ee/development/database/ci_mirrored_tables.html\n[mr_cross_join_detection]: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/68620\n[mr_cross_db_transaction_detection]: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/67213\n[mr_query_analyzer_metrics]: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/73839\n[mr_rubocop_rule_ar_base]: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/64937\n[mr_namespace_project_mirroring]: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/75517\n[mr_namespace_project_mirroring_consistency_check]: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/81836\n[docs_multiple_databases]: https://docs.gitlab.com/ee/development/database/multiple_databases.html\n",[915,959],{"slug":2488,"featured":6,"template":678},"path-to-decomposing-gitlab-database-part1","content:en-us:blog:path-to-decomposing-gitlab-database-part1.yml","Path To Decomposing Gitlab Database Part1","en-us/blog/path-to-decomposing-gitlab-database-part1.yml","en-us/blog/path-to-decomposing-gitlab-database-part1",{"_path":2494,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2495,"content":2500,"config":2505,"_id":2507,"_type":16,"title":2508,"_source":17,"_file":2509,"_stem":2510,"_extension":20},"/en-us/blog/path-to-decomposing-gitlab-database-part2",{"title":2496,"description":2497,"ogTitle":2496,"ogDescription":2497,"noIndex":6,"ogImage":2478,"ogUrl":2498,"ogSiteName":692,"ogType":693,"canonicalUrls":2498,"schema":2499},"GitLab database decomposition: Final migration and results","This is the second in our three-part technical summary of the yearlong project to decompose GitLab's Postgres database.","https://about.gitlab.com/blog/path-to-decomposing-gitlab-database-part2","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Decomposing the GitLab backend database, Part 2: Final migration and results\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dylan Griffith\"}],\n        \"datePublished\": \"2022-08-04\",\n      }",{"title":2501,"description":2497,"authors":2502,"heroImage":2478,"date":2484,"body":2503,"category":14,"tags":2504},"Decomposing the GitLab backend database, Part 2: Final migration and results",[2483],"\n\n_This blog post is part 2 in a three-part series about decomposing the GitLab backend database. It focuses on the final migration\nprocess and highlights the results we achieved after the migration. If you want to read about the design and planning phase, check out [part 1](/blog/path-to-decomposing-gitlab-database-part1/)._\n\n## Deciding between zero downtime and full downtime\n\nEarly on in the project we thought it would be necessary for the migration to\nbe \"zero downtime\" or \"near-zero downtime\". We [came up with this plan][initial_migration_plan_mr]\nearly on which involved (in summary):\n1. The entire database would be replicated (including non-CI tables) using\n   Patroni cascading/standby replication to a dedicated CI Patroni cluster.\n   Replication only lags by at most a few seconds.\n2. Read traffic for CI tables could be split ahead of time to read from the CI\n   replicas.\n3. Write traffic would be split ahead of the migration into CI and Main by\n   sending these through separate dedicated PGBouncer proxies. Initially CI\n   writes still go to the Main database since the CI cluster is just a standby.\n   These proxies would be the thing we reconfigured during the live migration\n   to point at the CI cluster.\n4. At the time of migration we would pause writes to the CI tables by pausing\n   the CI PGBouncer.\n5. After pausing writes to the CI database we'd capture the current LSN\n   position in Postgres of the Main primary database (now expect no more writes\n   to CI tables to be possible).\n6. After that we wait until the CI database replication catches up to that\n   point.\n7. Then we promote the CI database to accept writes (remove the cascading\n   replication).\n8. Then we reconfigure writes to point to the CI database by updating the write\n   host in the CI PGBouncer.\n9. The migration is done.\n\n![Database architecture actual final migration step](https://about.gitlab.com/images/blogimages/2022-07-15-path-to-decomposing-gitlab-database/phase4to7.png)\n\nThis approach (assuming that the CI replicas were only delayed by a few\nseconds) would mean that, at most, there would be a few seconds where CI writes\nmight result in errors and 500s for users. Many failures would likely already\nbe retried since much of CI write traffic goes via asynchronous (Sidekiq)\nprocesses that automatically retry.\n\nIn the end we didn't use this approach because:\n\n1. This approach didn't have an easy-to-implement rollback strategy. Data that\n   was written to CI tables during the migration would be lost if we rolled\n   back to just the Main database.\n2. The period of a few seconds where we expect to see some errors might make it\n   difficult for us to quickly determine the success or failure of the\n   migration.\n3. There was no hard business requirement to avoid downtime.\n\nThe [migration approach we ended up using][phase7_summary_epic] took two\nhours of downtime. We stopped all GitLab services that could read or write\nfrom the database. We also blocked user-level traffic at the CDN (Cloudflare) to allow us\nto do some automated and manual testing before opening traffic back up to\nusers. This allowed us to prepare a [slightly more straightforward rollback procedure][rollback_issue],\nwhich was:\n\n1. Reconfigure all read-only CI traffic back to the Main replicas\n2. Reconfigure all read-write CI traffic (via PGBouncer) back to the Main\n   primary database\n3. Increment the Postgres sequences for all CI tables to avoid overlapping with\n   data we created in our testing\n\nUltimately having a simple rollback mechanism proved very useful in doing many\npractice runs on staging.\n\n## Rehearsing the migration process\n\nBefore executing the final migration on GitLab.com, we executed seven rehearsals\nwith rollback and one final migration on our staging environment. In these\npractice runs, we discovered many small issues that would have likely caused\nissues in the production environment.\n\nThese rehearsals also gave all the participants an opportunity to perfect their steps\nin the process to minimize delays in our production rollout. This practice\nultimately allowed us to be quite confident in our timeline of at most two hours of downtime.\n\nIn the end, we finished the migration in 93 minutes, with a few small delays caused by\nsurprises we did not see in staging.\n\nThe rehearsal process was very time-consuming and a vast effort to execute in\nthe context of GitLab, where we all [work\nasynchronously](https://about.gitlab.com/company/culture/all-remote/asynchronous/)\nand across different timezones. However, it proved to be essential to the success of\nthis project.\n\n## Preparing for production migration\n\nOne week before our the final migration on production we prepared a production\nreadiness review issue for final approval from executives. This was a good\nopportunity to highlight all the preparation and validation we'd done to give\nus confidence in the plan. This also encouraged us to do extra validation where\nwe might expect to see questions or concerns about the plan.\n\nSome highlights from this review included:\n\n1. The amount of practice runs we'd done including details about the problems\n   we'd seen and resolved in staging\n2. Metrics which we'd observed to prove all the queries were using the right\n   database connections already\n3. Details about how long we'd been running without issues in local development\n   with all GitLab developers running with two databases by default\n4. Details about the rollback strategy we would use if necessary and how we\n   tested this rollback strategy in staging as well as some production\n   validation\n\n## Tracking the results\n\nAfter we completed the rollout we tracked\n[performance improvements across some metrics we expected to improve][performance_improvements_tracking_issue].\n\nThe data showed:\n\n- We decreased the CPU utilization of our primary database server, giving us much more headroom.\n\n  ![CPU peaks before and after decomposition shows smaller peaks after](https://about.gitlab.com/images/blogimages/2022-07-15-path-to-decomposing-gitlab-database/cpu-post-decomposition-improvement.png)\n\n- We can free around 9.2TiB out of 22TiB from our Main database by truncating the CI tables.\n- We can free around 12.5TiB out of 22TiB from our CI database by truncating the Main tables.\n- We significantly reduced the rate of dead tuples on our Main database.\n- We significantly reduced vacuuming saturation. Before decomposition the Main database\n  maximum vacuuming saturation was up to 100%, with the average closer to 80%. After\n  decomposition, vacuuming saturation has stabilized at around 15% for\n  both databases.\n\n  ![Vacuum saturation before and after decomposition shows a decrease after decomposition](https://about.gitlab.com/images/blogimages/2022-07-15-path-to-decomposing-gitlab-database/vacuum-saturation-post-decomposition.png)\n\n- We reduced the average query duration for our Sidekiq PGBouncer query\n  pool by at least a factor of 5 once we scaled up connection limits due to our\n  increased headroom. Previously we needed to throttle connections for\n  asynchronous workloads to avoid overloading the primary database.\n\n  ![Average active query duration by workload shows a decrease after scaling connections after decomposition](https://about.gitlab.com/images/blogimages/2022-07-15-path-to-decomposing-gitlab-database/pgbouncer-active-query-duration-by-workload.png)\n\n## Continue reading\n\nYou can read more about some interesting technical challenges and surprises we\nhad to deal with along the way in\n[part 3](/blog/path-to-decomposing-gitlab-database-part3/).\n\n[initial_migration_plan_mr]: https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/84588\n[performance_improvements_tracking_issue]: https://gitlab.com/gl-retrospectives/sharding-group/-/issues/18\n[phase7_summary_epic]: https://gitlab.com/groups/gitlab-org/-/epics/7791\n[rollback_issue]: https://gitlab.com/gitlab-org/gitlab/-/issues/361759\n",[915,959],{"slug":2506,"featured":6,"template":678},"path-to-decomposing-gitlab-database-part2","content:en-us:blog:path-to-decomposing-gitlab-database-part2.yml","Path To Decomposing Gitlab Database Part2","en-us/blog/path-to-decomposing-gitlab-database-part2.yml","en-us/blog/path-to-decomposing-gitlab-database-part2",{"_path":2512,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2513,"content":2518,"config":2522,"_id":2524,"_type":16,"title":2525,"_source":17,"_file":2526,"_stem":2527,"_extension":20},"/en-us/blog/path-to-decomposing-gitlab-database-part3",{"title":2514,"description":2515,"ogTitle":2514,"ogDescription":2515,"noIndex":6,"ogImage":2478,"ogUrl":2516,"ogSiteName":692,"ogType":693,"canonicalUrls":2516,"schema":2517},"Decomposing the GitLab backend database, Part 3: Challenges and surprises","This is the final installment in our three-part series about our yearlong project to decompose GitLab's Postgres database.","https://about.gitlab.com/blog/path-to-decomposing-gitlab-database-part3","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Decomposing the GitLab backend database, Part 3: Challenges and surprises\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dylan Griffith\"}],\n        \"datePublished\": \"2022-08-04\",\n      }",{"title":2514,"description":2515,"authors":2519,"heroImage":2478,"date":2484,"body":2520,"category":14,"tags":2521},[2483],"\n_This blog post is part 3 in a three-part series. It focuses on some interesting\nlow-level challenges we faced along the way, as well as some surprises we found during\nthe migration._\n\n- To read about the design and planning phase, check out [part 1](/blog/path-to-decomposing-gitlab-database-part1/).\n- To read about how we executed the actual migration and our results, check out [part 2](/blog/path-to-decomposing-gitlab-database-part2/).\n\n## The challenge with taking GitLab.com offline\n\nOne key part of our migration process was to take all systems offline that\ncould potentially talk to the database. This may seem as simple as \"shutting\ndown the servers\" but given the scale and complexity of GitLab.com's\ninfrastructure this proved to be really quite complex. Here is just a subset of\nthe different things we had to shut down:\n\n1. Kubernetes pods corresponding to web, API, and Sidekiq services\n2. Cron jobs across various VMs\n\n## Surprises along the way\n\nEven though we had rehearsed the migration many times in staging, there were\nstill some things that caught us off-guard in production. Luckily, we had\nallocated sufficient buffer time during the migration to resolve all of these\nduring the call:\n\n1. Autovacuum on our largest CI tables take a long time and can run at any\n   time. This delayed our migration as we needed to gain table locks for our\n   [write block\n   triggers](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/83211).\n   Adding these triggers requires a `ShareRowExclusiveLock` which cannot be\n   acquired while the autovacuum is running for that table. We disabled\n   some manual vacuum processes we were aware of ahead of the call but\n   autovacuum can happen at any time and our `ci_builds` table just happen to\n   have autovacuum at the time we were trying to block writes to this table. To\n   work around this we needed to temporarily disable autovacuum for the\n   relevant tables and then find the `pid` for the autovacuum process and\n   terminate this which allowed our triggers to be successfully added.\n2. Sometimes a long-running SSH session by an SRE or developer can leave open a\n   surprising database connection that needs to be tracked down and closed.\n3. Cron jobs can be run on various hosts that start rails processes or database\n   connections at any time. We had many examples that were created with\n   different purposes for database maintenance over the years, and we missed at\n   least one in our practice runs. They weren't as easy to detect on staging as\n   they may not all be configured on staging, or they run a lot faster on\n   staging. Also, our staging runs all happened on week days, but our\n   production migration happened on a weekend where it seemed we were\n   deliberately running some database maintenance workloads during low\n   utilization hours.\n4. Our Sentry client-side error tracking caused us to overload our Sentry\n   server due to many of users leaving open GitLab browser tabs. As\n   the browser tabs periodically make asynchronous requests to GitLab and get\n   errors (since GitLab.com was down), they then send all these errors to Sentry\n   and this overloaded our Sentry error server to the point we couldn't load it\n   to check for errors. This was quickly diagnosed based on the URL all the\n   requests were sent to, but it did delay our migration as checking for new\n   errors was key to determining success or failure of the migration.\n\n## Cascading replication doubles latency (triples in our case)\n\nA key initial step in our phased rollout was to move all read-only CI traffic\nto dedicated CI replicas. These were cascading replicas from the main Patroni\ncluster. Furthermore, we made the decision to create the standby cluster leader\nas a replica of another replica in the Main Patroni cluster. Ultimately this\nmeant the replication process for our CI replicas was\n`Main Primary -> Main Replica -> CI Standby Leader -> CI Replica`.\n\nThis change meant that our CI replicas had roughly three times as much latency\ncompared with our Main replicas, which previously served CI read-only traffic.\nSince our read-only load balancing logic is based on users sticking to the primary\nuntil a replica catches up with the last write that they performed, users\nmight end up sticking to the primary longer than they previously would have.\nThis may have served to increase our load on the primary database after rolling\nout Phase 3.\n\nWe never measured this impact, but in hindsight it is something we\nshould have factored in and benchmarked with our gradual rollout of Phase 3.\nAdditionally, we should have considered mitigating this issue by having the `CI\nStandby Leader` replicating straight from the `Main Primary` or adding the `CI\nStandby Leader` to the pool of replicas that we could service CI read-only\ntraffic.\n\n## Re-balancing PGBouncer connections incrementally without saturating anything\n\n[Phase 4 of our rollout][phase4_change_request] turned out to be one of the\ntrickiest parts of the migration. Since we wanted all phases (where possible)\nto be rolled out incrementally we needed some way to [solve for\nincrementally re-balancing connection pool limits][phase4_gradual_rollout_issue]\nfrom `GitLab -> PGBouncer -> Postgres` without exceeding the total connection\nlimit of Postgres or opening too many connections to Postgres that might\nsaturate CPU. This was difficult because all the connection limits were very\nwell tuned, and we were close to saturation across all these limits.\n\nThe gradual rollout of traffic for Phase 4 looked like:\n\n```mermaid\ngraph LR;\n    PostgresMain[(PostgresMain - Limit K max_connections)]\n    GitLabRails-->|100-X % of CI queries|PGBouncerMain\n    GitLabRails-->|X% of CI queries|PGBouncerCi\n    PGBouncerMain-->|Limit N pool_size|PostgresMain\n    PGBouncerCi-->|Limit M pool_size|PostgresMain\n```\n\nWe wanted to gradually increase X from 0-100. But this presented a problem, because\nthe number of connections to the `PostgresMain` DB will change\nwith this number.\n\nWe assume it has some initial limit `K` connections, and we\nassume this limit is deliberately just high enough to handle the current\nconnections from `PGBouncerMain` and not overload the CPU. We need to carefully\ntune `N` and `M` `pool_size` values across the separate PGBouncer processes to\navoid overloading the limit K, and we also need to avoid saturating the\nPostgres server CPU with too much traffic. At the same time, we need to ensure\nthere are enough connections to handle the traffic to both PGBouncer pools.\n\nWe addressed this issue by taking very small steps during low\nutilization hours (where CPU and connection pools weren't near saturation) and\ndoing very detailed analysis after each step. We would wait a day or so to figure out how\nmany connections to move over with the following steps, based on the number of\nconnections that were used by the smaller step. We also used what data we had\nearly on from table-based metrics to get an insight into how many connections\nwe thought we'd need to move to the CI PGBouncer pool.\n\nIn the end, we did need to make small adjustments to our estimates along the way\nas we saw saturation occur, but there was never any major user-facing saturation\nincidents, as the steps were small enough.\n\n## Final thoughts\n\nWe're very happy with the results of this project overall.\n\nA key objective of this project, which was hard to predict, was how the complexity of\nan additional database might impact developer productivity. They can't do\ncertain types of joins and there is more information to be aware of.\nHowever, many months have now passed, and it seems clear now that the complexity is mostly abstracted by Rails models. With continued large number of developers contributing, we have seen\nlittle-to-no impact on productivity.\n\nCombining this success with the huge scalability headroom we've gained, we believe this was a great decision for GitLab.\n\n## More reading\n\nThis blog series contains many links to see our early designing, planning, and\nimplementation of various parts of this project. GitLab's\n[transparency value](https://handbook.gitlab.com/handbook/values/#transparency)\nmeans you can read all the details and get a sense of what it's like to work on\nprojects like this at GitLab. If you'd like to know more or something was\nunclear please leave a comment, so we can make sure we share all our learnings.\n\n[phase4_change_request]: https://gitlab.com/gitlab-com/gl-infra/production/-/issues/6440\n[phase4_gradual_rollout_issue]: https://gitlab.com/gitlab-org/gitlab/-/issues/347203\n",[915,959],{"slug":2523,"featured":6,"template":678},"path-to-decomposing-gitlab-database-part3","content:en-us:blog:path-to-decomposing-gitlab-database-part3.yml","Path To Decomposing Gitlab Database Part3","en-us/blog/path-to-decomposing-gitlab-database-part3.yml","en-us/blog/path-to-decomposing-gitlab-database-part3",{"_path":2529,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2530,"content":2536,"config":2542,"_id":2544,"_type":16,"title":2545,"_source":17,"_file":2546,"_stem":2547,"_extension":20},"/en-us/blog/how-to-provision-reviewops",{"title":2531,"description":2532,"ogTitle":2531,"ogDescription":2532,"noIndex":6,"ogImage":2533,"ogUrl":2534,"ogSiteName":692,"ogType":693,"canonicalUrls":2534,"schema":2535},"Deploying dynamic review environments with MRs and Argo CD","Here's how to use the Argo CD ApplicationSet to provision a ‘ReviewOps’ environment based on merge request changes.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681920/Blog/Hero%20Images/kubernetes.png","https://about.gitlab.com/blog/how-to-provision-reviewops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to provision dynamic review environments using merge requests and Argo CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Joe Randazzo\"},{\"@type\":\"Person\",\"name\":\"Madou Coulibaly\"}],\n        \"datePublished\": \"2022-08-02\",\n      }",{"title":2537,"description":2532,"authors":2538,"heroImage":2533,"date":2539,"body":2540,"category":14,"tags":2541},"How to provision dynamic review environments using merge requests and Argo CD",[2072,1324],"2022-08-02","\nWe recently learned of a new contribution to the ApplicationSet in the Argo CD project, specifically the [Pull Request generator for GitLab](https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/applicationset/Generators-Pull-Request.md#gitlab) and decided to take it for a spin. What makes this interesting is now dynamic [review environments](https://docs.gitlab.com/ee/ci/review_apps/index.html) can be provisioned intuitively from the merge request (MR) using a [GitOps](/topics/gitops/) workflow. The benefit is code reviewers or designers can quickly review any app changes to your Kubernetes cluster all from within the merge request.\n\nIn traditional testing workflows, you may have pushed your changes into a development environment, waiting for the QA and UX team to pull those changes into their environment for further review, and then received feedback based on your small change. At this point, time was wasted between various teams with environment coordination or adding bugs to the backlog of the new changes. \n\nWith the combination of a merge request and review environments, you can quickly spin up a test environment based on the changes of your feature branch. This means the QA or UX team can suggest improvements or changes during the code review process without wasting cycles.\n\nThe introduction of the ApplicationSet has given greater flexibility to Argo CD workflows such as:\n\n- Allowing unprivileged cluster users to deploy applications (without namespace access)\n- Deploying applications to multiple clusters at once\n- Deploying many applications from a single monorepo\n- **And triggering review environments based on a pull request**\n\n### Let's review the ApplicationSet and the GitLab Pull Request Generator\n\nThe [Pull Request Generator](https://argo-cd.readthedocs.io/en/latest/operator-manual/applicationset/Generators-Pull-Request) will use the GitLab API to automatically discover new merge requests within a repository. Depending on the filter match of the MR, a review environment will then be generated.\n\n```yaml\napiVersion: argoproj.io/v1alpha1\nkind: ApplicationSet\nmetadata:\n  name: review-the-application\n  namespace: argocd\nspec:\n  generators:\n  - pullRequest:\n      gitlab:\n        project: \u003Cproject-id>\n        api: https://gitlab.com/\n        tokenRef:\n          secretName: \u003Cgitlab-token>\n          key: token\n        pullRequestState: opened\n      requeueAfterSeconds: 60\n  template:\n    metadata:\n      name: 'review-the-application-{{number}}'\n    spec:\n      source:\n        repoURL: \u003Crepository-with-manifest-files>\n        path: chart/\n        targetRevision: 'HEAD'\n        helm:\n          parameters:\n          - name: \"image.repository\"\n            value: \"registry.gitlab.com/\u003Cgroup-and-project-path>/{{branch}}\"\n          - name: \"image.tag\"\n            value: \"{{head_sha}}\"\n          - name: \"service.url\"\n            value: \"the-application-{{number}}.\u003Cip>.nip.io\"\n      project: default\n      destination:\n        server: https://kubernetes.default.svc\n        namespace: dynamic-environments-with-argo-cd\n```\n#### Fields\n\n* `project`: The GitLab Project ID\n* `api`: URL of GitLab instance\n* `tokenRef`: The secret to monitor merge request changes\n* `labels`: Provision review environments based on a GitLab label\n* `pullRequestState`: Provision review environments based on [MR states](https://docs.gitlab.com/ee/api/merge_requests.html)\n\nFilter options include GitLab labels, merge request state (open, closed, merged), and branch match. Templating options include merge request ID, branch name, branch slug, head sha, and head short sha.\n\nSee the latest [ApplicationSet documentation](https://argo-cd.readthedocs.io/en/latest/operator-manual/applicationset/Generators-Pull-Request/#gitlab) for additional details.\n\nFor this blog post, we explore using the Argo CD ApplicationSet to provision a “ReviewOps” environment based on merge request changes.\n\n### Prerequisites\n\nThe following tools are required for running this tutorial. Please install and/or configure them before getting started.\n\n- **Tools**\n  - GitLab v15.0+ \n  - Kubernetes cluster v1.21+\n  - Argo CD 2.5.0+\n- **CLI**\n  - kubectl v1.21+\n\n### Explore the Source Code\n\nFirst, let’s explore the [source code](https://gitlab.com/madou-stories/dynamic-environments-with-argo-cd) for the tutorial.\n\nThis GitLab group is composed of the 2 following projects:\n\n- `The Application`: contains the source code of a containerized application and its CI/CD pipeline\n- `The Application Configuration`: contains the application configuration (Kubernetes Manifests) managed by Helm\n\n![git-repository](https://about.gitlab.com/images/blogimages/2022-08-01-how-to-provision-reviewops/git-repository.png)\n\n### Setting up GitLab\n\n1. Create your GitLab Group and fork the [The Application](https://gitlab.com/madou-stories/dynamic-environments-with-argo-cd/the-application) and [The Application Configuration](https://gitlab.com/madou-stories/dynamic-environments-with-argo-cd/the-application-configuration) projects into it.\n\n2. In `The Application Configuration` project, edit the `**manifests/applicationset.yml**` as follows:\n\n  * `.spec.generators.pullRequest.gitlab.project`: The Project ID of `The Application`\n  * `.spec.template.spec.source.repoURL`: Git URL of `The Application Configuration`\n  * `.spec.template.spec.source.helm.parameters.\"image.repository\"`: Point to image repository, for example `registry.gitlab.com/\u003CYour_GitLab_Group>/the-application/{{branch}}`\n\n  Note: keep the {{branch}} string as is and replace \u003CYour_GitLab_Group> with the name of the group you created in step 1.\n\n  * `.spec.template.spec.source.helm.parameters.\"service.url\"`: Templated with `the-application-{{number}}.\u003CYour_Kube_Ingress_Base_Domain>`\n\n  Note: keep the {{number}} string as is and replace \u003CYour_Kube_Ingress_Base_Domain> with the base domain of your Kubernetes Cluster.\n\n3. Define the following CI/CD variables at the group level:\n\n   - `ARGOCD_SERVER_URL`, the Argo CD server address\n   - `ARGOCD_USERNAME`, the username of your Argo CD account\n   - `ARGOCD_PASSWORD`, the password of your Argo CD account\n   - `KUBE_INGRESS_BASE_DOMAIN`, the base domain of your Kubernetes Cluster\n\n   ![cicd-variables](https://about.gitlab.com/images/blogimages/2022-08-01-how-to-provision-reviewops/cicd-variables.png)\n\n4. Generate a Group access token to grant `read_api` and `read_registry` access to this group and its sub-projects.\n\n   ![group-access-token](https://about.gitlab.com/images/blogimages/2022-08-01-how-to-provision-reviewops/group-access-token.png)\n\n   Save the group access token somewhere safe. We will use it later.\n\n### Setting up Kubernetes\n\n1. Create a namespace called `dynamic-environments-with-argo-cd`.\n   ```shell\n   kubectl create namespace dynamic-environments-with-argo-cd\n   ```\n2. Create a Kubernetes secret called `gitlab-token-dewac` to allow Argo CD to use the GitLab API.\n   ```shell\n   kubectl create secret generic gitlab-token-dewac -n argocd --from-literal=token=\u003CYour_Access_Token>\n   ```\n3. Create another Kubernetes secret called `gitlab-token-dewac` to allow Kubernetes to pull images from the GitLab Container Registry.\n   ```shell\n   kubectl create secret generic gitlab-token-dewac -n dynamic-environments-with-argo-cd --from-literal=token=\u003CYour_Access_Token>\n   ```\n\n### Setting up Argo CD\n\n1. Create the Argo CD ApplicationSet to generate an Argo CD Application associated with a merge request.\n   ```shell\n   kubectl apply -f https://gitlab.com/\u003CYour_GitLab_Group>/the-application-configuration/-/raw/main/manifests/applicationset.yaml\n   ```\n\n### Update the source code\n\n1. In `The Application` project, create a GitLab issue, then an associated branch and merge request. \n2. In Argo CD, a new application is provisioned called `review-the-application` based on the new merge request event.\n\n   ![review-the-application-argocd](https://about.gitlab.com/images/blogimages/2022-08-01-how-to-provision-reviewops/review-the-application-argocd.png)\n\n3. In `The Application` project, edit the `index.pug` and replace `p Welcome to #{title}`  with `p Bienvenue à #{title}`.\n4. Commit into your recent branch which is going to trigger a pipeline run.\n5. In the CI/CD > Pipelines, you will find the following pipeline running on your merge request:\n\n   ![feature-branch-pipeline](https://about.gitlab.com/images/blogimages/2022-08-01-how-to-provision-reviewops/feature-branch-pipeline.png)\n\n   where,\n\n   - `docker-build`: builds the container image\n   - `reviewops`: configures and deploys the container into the review environment using Argo CD\n   - `stop-reviewops`: deletes the review environment\n\n6. Once completed, the `review-the-application` application in Argo CD is now synced.\n\n   ![review-the-application-synced](https://about.gitlab.com/images/blogimages/2022-08-01-how-to-provision-reviewops/review-the-application-synced.png)\n\n7. From the merge request, click on the `View app` button to access to your application.\n\n   ![view-app-button](https://about.gitlab.com/images/blogimages/2022-08-01-how-to-provision-reviewops/view-app-button.png)\n\n   The outcome should be as follows:\n\n   ![express-app](https://about.gitlab.com/images/blogimages/2022-08-01-how-to-provision-reviewops/express-app.png)\n\n8. You have succesfully provisioned a dynamic review environment based on your merge request! Once the merge request is closed, the environment will be automatically cleaned up.\n\n## To sum up\n\nHopefully this tutorial has been helpful and has inspired your GitLab + Argo CD workflows with review environments.\n\nWe'd love to hear in the comments on how this is working for you, as well as your ideas on how we can make GitLab a better place for GitOps workflows.\n",[1084,535,894],{"slug":2543,"featured":6,"template":678},"how-to-provision-reviewops","content:en-us:blog:how-to-provision-reviewops.yml","How To Provision Reviewops","en-us/blog/how-to-provision-reviewops.yml","en-us/blog/how-to-provision-reviewops",{"_path":2549,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2550,"content":2556,"config":2562,"_id":2564,"_type":16,"title":2565,"_source":17,"_file":2566,"_stem":2567,"_extension":20},"/en-us/blog/how-to-access-gitlab-on-a-private-network-with-tailscale",{"title":2551,"description":2552,"ogTitle":2551,"ogDescription":2552,"noIndex":6,"ogImage":2553,"ogUrl":2554,"ogSiteName":692,"ogType":693,"canonicalUrls":2554,"schema":2555},"How to access GitLab on a private network with Tailscale","If issues around a private network were preventing a permanent GitLab installation, Brendan O'Leary has the solution with Tailscale.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679348/Blog/Hero%20Images/locks.jpg","https://about.gitlab.com/blog/how-to-access-gitlab-on-a-private-network-with-tailscale","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to access GitLab on a private network with Tailscale\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2022-07-21\",\n      }",{"title":2551,"description":2552,"authors":2557,"heroImage":2553,"date":2559,"body":2560,"category":14,"tags":2561},[2558],"Brendan O'Leary","2022-07-21","\nGitLab provides an easy-to-install package for most Linux distributions and even for devices like the [Raspberry Pi](https://docs.gitlab.com/omnibus/settings/rpi.html). However, if you want to install GitLab in a home lab or similar private network, you would then be faced with a new issue: how do you access the instance from outside that private network?\n\nTraditionally, you would set up your router to forward traffic from your public IP address to the server inside your network. However, this comes with several drawbacks:\n\n- Opening a port on your home or private network comes with a sustainable amount of risk.\n- It can be hard or impossible for folks to do depending on their internet service provider and what routing equipment they use.\n- It can be especially tough if your ISP doesn't provide you with a statically assigned IP address which means your address can change from time to time, and you'll need to either update DNS manually or through some third-party [dynamic DNS](https://www.cloudflare.com/learning/dns/glossary/dynamic-dns/) service.\n\nFor me, all of these challenges have meant that I've only ever really run GitLab \"for fun\" on my local network. Given the challenges above, running a permanent installation wasn't an option. That is until [Tailscale](https://tailscale.com) entered my life.\n\n## Tailscale\n\nTailscale isn't necessarily the \"newest\" technology. In fact, it is based on the [WireGuard protocol](https://www.wireguard.com/), which has existed in one form or another since 2015 and has seen native kernel support added to various Linux distributions as well as the kernel itself over the past several years.  Wireguard VPN technology makes considerable improvements in the usability and setup of virtual private networks over earlier protocols like IPsec. Even with being easier to use, the \"problem\" with WireGuard, at least for me, was always that it was still too complex to set up and maintain. Much like configuring my ISP's router for port forwarding, it wasn't _impossible_, but it just wasn't practical.\n\nEnter Tailscale. Tailscale provides a simple piece of client software, available for Linux, Mac, and Windows (and iOS and Android!), which implements the WireGuard protocol and allows you to control your VPN network from a handy web interface. Not only that, it's [free to use](https://tailscale.com/pricing/) for individuals and small networks. When I started using Tailscale, it was to make sure I could connect back to my home network and troubleshoot it while traveling for work. As the only system administrator in my house, this was fantastic.\n\nHowever, Tailscale also offers the ability to easily access services inside of various networks as well by setting up a mesh VPN between them, all with IP addresses in the 100.x.y.z range. That means for any web service or other service on my network, I can access it with a statically assigned IP address from any other device connected to Tailscale, and create a DNS record to have a domain point to the IP address. At last, I could run GitLab (and other open source tools) at home and safely connect to them from outside my house with as little hassle as possible. So how did I get it to work?\n\n## Tailscale and GitLab together\n\nAssuming you already have a GitLab [installation](/install/) up and running on your network, getting it working through Tailscale involves a few steps:\n\n- Installing Tailscale\n- Setting up DNS for the private address\n- Configuring HTTPS encryption\n\n### Installing Tailscale\n\nPackages are [available](https://tailscale.com/kb/1031/install-linux/) for many Linux distributions. To install Tailscale, you can select your [specific distribution](https://tailscale.com/kb/1031/install-linux/) for detailed instructions. There are also [static binaries](https://tailscale.com/kb/1053/install-static/) if you can't find your particular distribution - they are available for x86 and ARM CPUs for both 32- and 64-bit variants.\n\nOnce Tailscale is installed, getting it running is as simple as running the following command on the CLI:\n\n```bash\nsudo tailscale up\n```\n\nThe setup dialogue will walk you through the authentication process and get Tailscale running. After that process, you can see your new IP address for this node on your network with the CLI command `tailscale ip -4`. You'll need that IP address for the next steps.\n\nBy default, Tailscale will set an expiration date for the token it issues to your device during the authentication process. This is desirable for typical devices that may be transient or portable. However, suppose your device is secured inside your home or another secure place AND is a server you're not accessing all the time. In that case, you can optionally [disable key expiry](https://tailscale.com/kb/1028/key-expiry/) for that particular device.\n\n### Setting up DNS\n\nYou should be able to now access your device from any other Tailscale-connected device via the IP address from the last step. However, my goal was to make it easy for me to connect to GitLab, reference it by an URL, and encrypt the traffic end-to-end with TLS. As the next step I set up DNS.\n\nEven though the 100.x.y.z address is a private IP address, you can still create a public DNS record and have the hostname to point to it. That won't mean the whole world can access your server - it just means once you're connected to your Tailscale network, you can resolve that hostname to the IP address and access the web server. For me, I set up an A record for `gitpi.boleary.dev` to resolve to an IPv4 address:\n\n```\n;; QUESTION SECTION:\n;gitpi.boleary.dev.\t\tIN\tA\n\n;; ANSWER SECTION:\ngitpi.boleary.dev.\t300\tIN\tA\t100.64.205.40\n```\n\nAn important note here is that I use Cloudflare as my DNS provider - and I usually love Cloudflare's proxying service to make my \"real\" IP addresses private. In this case, you have to disable that proxying to make sure that you can resolve the correct address - Cloudflare can't proxy traffic into your Tailscale network.\n\n### Configuring HTTPS\n\nLastly, configuring HTTPS for your GitLab instance will ensure that all traffic is encrypted end-to-end. While Tailscale encrypts the traffic over the network, this will ensure there are no gaps between your device and your GitLab server.\n\nTo accomplish this, we'll use [`certbot`](https://certbot.eff.org/) from the EFF that lets us create and manage [Let's Encrypt](https://letsencrypt.org/) certificates. First, install `certbot` with `sudo apt install certbot` or follow the [instructions for your distribution](https://certbot.eff.org/instructions).\n\nAfter certbot is installed, issue a certificate to use with GitLab using a DNS challenge. Follow the steps to complete the DNS challenge after running this command:\n\n```bash\nsudo certbot certonly --manual --preferred-challenges dns\n```\n\nThe output will show you the specific location of the certificate it created (in my case, in a `gitpi.boleary.dev` folder), and you should link that certificate to GitLab's SSL directory by running:\n\n```bash\nsudo mkdir /etc/gitlab/ssl/\nsudo ln -s /etc/letsencrypt/live/gitpi.boleary.dev/fullchain.pem /etc/gitlab/ssl/gitpi.boleary.dev.crt\nsudo ln -s /etc/letsencrypt/live/gitpi.boleary.dev/privkey.pem /etc/gitlab/ssl/gitpi.boleary.dev.key\n```\n\nNext, configure GitLab to use the new certificate by opening the `gitlab.rb` with\n\n```bash\nsudo vi /etc/gitlab/gitlab.rb\n```\n\nAnd change the `external_url` value to match the URL for the certificate (e.g. `https://gitpi.boleary.dev`). That \"https\" will tell GitLab to enable TLS/SSL and use your linked certificate.\n\n## Finishing up\n\nThat's it! Now with a simple `gitlab-ctl reconfigure`, GitLab will pick up the new certificate and start responding to requests at that URL. From any device - iOS, Android, laptop, etc. - connected to your Tailscale network, you can access your GitLab installation (securely) from anywhere!\n",[726,703,232],{"slug":2563,"featured":6,"template":678},"how-to-access-gitlab-on-a-private-network-with-tailscale","content:en-us:blog:how-to-access-gitlab-on-a-private-network-with-tailscale.yml","How To Access Gitlab On A Private Network With Tailscale","en-us/blog/how-to-access-gitlab-on-a-private-network-with-tailscale.yml","en-us/blog/how-to-access-gitlab-on-a-private-network-with-tailscale",{"_path":2569,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2570,"content":2576,"config":2580,"_id":2582,"_type":16,"title":2583,"_source":17,"_file":2584,"_stem":2585,"_extension":20},"/en-us/blog/quickly-onboarding-engineers-successfully",{"title":2571,"description":2572,"ogTitle":2571,"ogDescription":2572,"noIndex":6,"ogImage":2573,"ogUrl":2574,"ogSiteName":692,"ogType":693,"canonicalUrls":2574,"schema":2575},"How to quickly (and successfully) onboard engineers","It's a tough hiring market today. Here's how GitLab gets engineers onboard fast and sets them up for success.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670635/Blog/Hero%20Images/kubernetesterms.jpg","https://about.gitlab.com/blog/quickly-onboarding-engineers-successfully","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to quickly (and successfully) onboard engineers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2022-07-21\",\n      }",{"title":2571,"description":2572,"authors":2577,"heroImage":2573,"date":2559,"body":2578,"category":14,"tags":2579},[1441],"\n\nNo one ever said hiring was easy. As a matter of fact, talent hiring and retention are some of the hardest aspects to get right for any software company. \n\nAccording to [a recent article at Developer Pitstop](https://developerpitstop.com/how-long-do-software-engineers-stay-at-a-job/) the average engineer will only stay at a job for an average of two years before moving on, and this tenure is shrinking as time goes on. \n\nWhen we look at the average timeline for engineers in a new role we usually see something like:\n\n> - Learning and adaptation (3 / 6 months):\n>   Coming to grips with the new company, team, and their processes.\n> \n> - Creating value for the organization (6 / 12 months):\n>   Adding value to the business by becoming a functioning member of the team.\n> \n> - Becoming a role expert (6 / 18 months):\n>   Owning the role completely and helping to shape the direction of the team.\n\n## Software engineer onboarding\n\nAt GitLab we pride ourselves on an outstanding onboarding process to reduce the amount of time an engineer will spend in the `learning and adaptation` bracket and accelerate their evolution into the `creating value for the organization` bracket. We do this for two main reasons:\n\n- **Quicker integration**: We aim to have engineers ship production code in less than one week, and fully onboard them in less than three months.\n- **Reduce turnover**: Engineers who have an awesome onboarding experience tend to stay with the same company longer.\n\n**The bottom line is that with these benefits, investing in an amazing onboarding process gives you the highest ROI on your hiring initiatives.**\n\nSo, now that we know **why** we need to ensure we onboard quickly and correctly, let's talk about **how** we do it at GitLab. \n\n## Software engineer onboarding process: An overview\n\n- 💯 Before day one \n- 💥 It's all about the onboarding issue\n- 🥂 Pick the right onboarding buddy\n- 👌 Pair, pair, and more pairing\n- 🖐 All the coffee chats\n- 🤘 Tailor the experience to the role\n- 🚢 Ship some code in a week or less\n- 💬 Let's get (and give) some feedback\n\n![onboarding](https://about.gitlab.com/images/blogimages/onboarding.png){: .shadow}\n\n## 💯 Before day one\n\nThe best processes for onboarding software engineers start as soon as the candidate has officially accepted the offer. This is done in a few ways:\n\n- An onboarding issue is created with tasks for the hiring manager, their buddy, and People Experience (HR).\n- The hiring manager selects the right onboarding buddy for the engineer and communicates expectations (more on this later).\n- The engineer's accounts (Email, GitLab account, Okta, etc) are created and their hardware is shipped.\n- GitLab reaches out via email to let the candidate know what the onboarding process looks like.\n- The hiring manager reaches out to the engineer via email to set up a coffee chat on Day 1 as the initial process might seem overwhelming.\n\nFor us, the most important aspect is communication with the engineer to ensure they are set up for success. We provide them with access to their onboarding issue, helpful video guides for getting started, and a primer on how to navigate our [handbook like a pro](https://about.gitlab.com/handbook/people-group/general-onboarding/). The reason this is so important is that we know if we stop communicating with the engineer after signing, we are at risk of creating uncertainty, introducing inefficiency, or even losing them to another offer during that time.\n\n## 💥 It's all about the onboarding issue\n\nAt GitLab, our [onboarding issue](https://about.gitlab.com/handbook/people-group/general-onboarding/#onboarding-issue-template-links) is the most effective tool we have for successfully onboarding a new engineer quickly. Hiring managers use this issue almost exclusively, both for building momentum and for following our value of transparency. We use this issue, instead of Slack or email, to create a single source of truth for everyone during the process and to prevent fragmented communication. For anyone new at GitLab, the first few weeks can seem like a lot to get on top of, so the hiring manger wants to be mindful of opportunities to consolidate communication and reduce context switching. \n\nOur onboarding issues are confidential because they contain sensitive account information, but the templates of the issue are [public](https://about.gitlab.com/handbook/people-group/general-onboarding/#onboarding-issue-template-links) and they look something like this:\n\n```\n- Accounts and access\n- Day 1: Getting started: Accounts and paperwork\n- Day 2: Remote working and our values\n- Day 3: Security & compliance\n- Day 4: Social & benefits\n- Day 5: Git & push some code\n- Weeks 2 - 4: Explore\n- Job-specific tasks\n```\n\nAs a hiring manager, you want to ensure that you have fleshed out the `job specific tasks` ahead of time with things that are important for the specific role the engineer will be working in. This will generally include things like ensuring they have database access, pointing them to the working groups that will support their work, and letting them know the right Slack channels to support their development. \n\n## 🥂 Pick the right onboarding buddy\n\nThe advantages of the buddy system have been [well documented for years](https://www.pmi.org/learning/library/implementing-buddy-system-workplace-9376). At GitLab we lean heavily on the onboarding buddy [model](https://about.gitlab.com/handbook/people-group/general-onboarding/onboarding-buddies/) and rather than having multiple people support the new engineer, it will generally be the hiring manager and a single buddy. \n\nThe advantages of an onboarding buddy at GitLab are several:\n\n- **Domain expert**: A onboarding buddy knows the domain the new engineer is going to be working in. They have already written, reviewed, and merged code into production in the same way we want the new engineer to. They know the process, pitfalls, and gotchas of the domain. \n- **Single context / Accountabilibuddy**: A single onboarding buddy drastically reduces context switching and \"paralysis by analysis.\" They know they always have someone to ask and this creates a psychologically safe space for them. GitLab can often be a scary environment to navigate when you are new due to impostor syndrome and we want to curb that. \n- **GitLabisms**: At GitLab, we have code and then we have \"GitLabisms.\" These are things that are specific to GitLab, be it workflows or custom tooling. These are often more complicated to become familiar with than the code itself. The onboarding buddy should have experience with these already and be able to point the engineer in the right direction when they are stuck. \n- **Mentor**: Mentoring is one of the single best things an engineer can do to grow themselves and become more sure of their own skills. By being an onboarding buddy, they are given a growth opportunity to cover their own blindspots and upskill. \n\nAs a rule of thumb, the onboarding buddy should ideally be someone from the engineer's new team who is working in the same domain, i.e. a senior frontend engineer mentors a new intermediate frontend engineer, both of which are from the same team. While this rule is not set in stone, it is often less effective to have an onboarding buddy be cross-team due to a lack of domain expertise.\n\n## 👌 Pair, pair, and more pairing\n\nPairing when programming and when working on tasks is a very effective way to help new engineers build up their knowledge without needing to pour through documentation. \n\nIn general, we would recommend that the engineer pair with their onboarding buddy on their first few merge requests to get used to the workflow and pitfalls of working with the GitLab Development Kit. But this is not where it should stop. We encourage pairing across the board at GitLab either via open pairing sessions such as our Frontend Pairing office hours, having a manager pair with an engineer, or pairing with a stable counterpart such as your team's UX designer. \n\nWhen it comes to onboarding, pairing is helpful. We do this because we want to:\n\n- **Create psychological safety**: We all feel impostor syndrome. This is worse when you're new to a job and don't know the ecosystem yet. Regular pairing helps to undo that worry as you see people are just people and even staff/principal engineers forget the closing brace!\n- **Create relationships/network**: In an all-remote company, it becomes important to know who to reach out to in moments of need. Regular pairing helps to foster these relationships and creates a safety net with your peers. \n- **Demonstrate our values**: We believe in [CREDIT](https://handbook.gitlab.com/handbook/values/) at GitLab. Regular pairing supports all our core values and helps to encourage us to be mindful of them when working. \n- **Give and get real-time feedback**: When pairing, we can get real-time feedback on our process and how we're approaching solutions. This is extremely important for new engineers who might not be familiar with core GitLab concepts such as [iteration](https://handbook.gitlab.com/handbook/values/#iteration) (\"How can we break this down?\").\n\n## 🖐 All the coffee chats\n\nBeing distributed means we do communication differently at GitLab. One key to successfully onboarding a new engineer is to get them comfortable with our communication style. \n\nTo do this, we encourage regular [coffee chats](https://about.gitlab.com/company/culture/all-remote/informal-communication/#coffee-chats) and a culture of zero shame about it. \n\nEncourage your new hire to set up regular coffee chats with people across the company to help build rapport and become comfortable with GitLab as a whole. \n\nTo help empower new hires, have them ask the following question in their initial 10  - 15 chats:\n\n> What is the one thing I can do to be successful at GitLab?\n\n## 🤘 Tailor the experience to the role\n\nAs a hiring manger, you need to understand that people learn and grow in different ways. No single method will work for everyone and it is your job to ensure your new hire feels supported in how they want to learn. \n\nDuring the onboarding, observe your new hire and touch base with them in your weekly 1:1 for what they are and **are not** enjoying about the experience so far. Once you have this information, iterate on it and tailor their onboarding to include more of what they prefer. \n\nAsk constructive questions that can have actionable tasks each week to ensure a better process for them:\n\n> Do you want to pair more? Do you want more alone time? Are there particular areas you need more guidance in? Are there things I can do to better support you?\n\nYou should aim to strike a balance during their onboarding for a mixture of practical work and time dedicated to studying. Work with the direct report to establish the best balance for them as an individual. \n\n## 🚢 Ship some code in a week or less\n\nThis is arguably the most important aspect of successfully onboarding an engineer and setting them up for success. The sooner they can push code to production, the sooner they can begin to refine their skills and work effectively with the team. \n\nThe best software companies in the world set a timeline of shipping code in a week. At GitLab, this is not a hard-and-fast rule, but in the **Create** stage is what we strive for. \n\nTo ensure an engineer can ship code within a week, we need to ensure they are supported in a few ways:\n\n- **Tooling**: At GitLab we have a fantastic [local development kit](https://gitlab.com/gitlab-org/gitlab-development-kit) which sets up an engineer to begin delivering code. We support this kit heavily as a first-class citizen and are constantly refining the tooling and [docs](https://gitlab.com/gitlab-org/gitlab-development-kit/-/tree/main/doc) to ensure everyone can contribute. For a new hire, consider having their first pairing session to be setting up their GDK – this will get them one step closer to shipping quality code. \n- **Dev process**: At GitLab, we always strike to [break down work into its smallest deliverable](https://about.gitlab.com/handbook/product/product-principles/#the-minimal-viable-change-mvc) that can be picked up by an engineer without deep contextual understanding. We do this to support the open source community as much as our own engineers. \n\n## 💬 Let's get some feedback\n\nAs a hiring manager, you want to ensure you build a stable feedback loop into your processes and this includes onboarding. During your 1:1s you should include a weekly feedback cycle for both **you** and your direct report. \n\nThese feedback cycles should take the form of:\n\n- **Appreciation (Collaboration / Results / Diversity / Iteration / Transparency)**: A moment of appreciation for something positive that is highlighted inline with our values. \n- **Coaching (Collaboration / Results / Diversity / Iteration / Transparency)**: A growth opportunity that is highlighted inline with our values. \n\nThese weekly feedback loops allow the engineer to highlight things that could be done better in both the context of the onboarding and their day-to-day experience. \n\nLastly, it is optional but encouraged to hold an onboarding retrospective when the initial onboarding issue is closed with the following points to talk through:\n\n- What went well?\n- What didn't go so well? \n- What could be improved? \n- Action items\n\n## 💾 TL;DR \n\nThe most successful software companies have a solidified onboarding process and continue to expand on it, setting up both the company and engineers for long-term success. The above methods are how we do it at GitLab. \n\n## 💻 Remote development and the developer experience\n\nAt GitLab we have recently been hiring for our [Remote Development effort](https://about.gitlab.com/direction/create/ide/remote_development/) and many of these items are in play with the engineers we are bringing into the company. We want to improve these processes to make onboarding even easier, mitigating the need for even setting up a specific local development toolchain to be able to ship production code. \n\nIf you think you might be interested in a role at GitLab working on Remote Development, check out our open listings [here](https://boards.greenhouse.io/gitlab/jobs/6201785002).\n\nRead more about [leading endingeering teams](/blog/cadence-is-everything-10x-engineering-organizations-for-10x-engineers/).\n",[894,1328,1347],{"slug":2581,"featured":6,"template":678},"quickly-onboarding-engineers-successfully","content:en-us:blog:quickly-onboarding-engineers-successfully.yml","Quickly Onboarding Engineers Successfully","en-us/blog/quickly-onboarding-engineers-successfully.yml","en-us/blog/quickly-onboarding-engineers-successfully",{"_path":2587,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2588,"content":2594,"config":2600,"_id":2602,"_type":16,"title":2603,"_source":17,"_file":2604,"_stem":2605,"_extension":20},"/en-us/blog/reducing-pager-fatigue-and-improving-on-call-life",{"title":2589,"description":2590,"ogTitle":2589,"ogDescription":2590,"noIndex":6,"ogImage":2591,"ogUrl":2592,"ogSiteName":692,"ogType":693,"canonicalUrls":2592,"schema":2593},"How we improved on-call life by reducing pager noise","Too many pages? Here's how we tackled on-call SRE quality of life by grouping alerts by service and only paging on downstream services.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682368/Blog/Hero%20Images/cover.png","https://about.gitlab.com/blog/reducing-pager-fatigue-and-improving-on-call-life","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we improved on-call life by reducing pager noise\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Steve Azzopardi\"}],\n        \"datePublished\": \"2022-07-19\",\n      }",{"title":2589,"description":2590,"authors":2595,"heroImage":2591,"date":2597,"body":2598,"category":14,"tags":2599},[2596],"Steve Azzopardi","2022-07-19","\n\nTo monitor the health of GitLab.com we use multiple\n[SLIs](https://sre.google/workbook/implementing-slos/#what-to-measure-using-slis)\nfor each service. We then page the on-call when one of these SLIs is not\nmeeting our internal [SLOs and burning through the error\nbudget](https://sre.google/workbook/implementing-slos/#decision-making-using-slos-and-error-budgets)\nwith the hopes of fixing the problem before too many of our users even notice.\n\nAll of our services SLIs and SLOs are defined using [jsonnet](https://jsonnet.org/) in\nwhat we call the [metrics-catalog](https://gitlab.com/gitlab-com/runbooks/-/tree/168397dee0ad955bfb473fd0ddb9146667eeaa13/metrics-catalog)\nwhere we specify a service and its SLIs/SLOs. For example, the [`web-pages`](https://gitlab.com/gitlab-com/runbooks/-/blob/168397dee0ad955bfb473fd0ddb9146667eeaa13/metrics-catalog/services/web-pages.jsonnet)\nservice has [an apdex SLO of 99.5%](https://gitlab.com/gitlab-com/runbooks/-/blob/168397dee0ad955bfb473fd0ddb9146667eeaa13/metrics-catalog/services/web-pages.jsonnet#L22)\nand multiple SLIs such as [loadbalancer](https://gitlab.com/gitlab-com/runbooks/-/blob/168397dee0ad955bfb473fd0ddb9146667eeaa13/metrics-catalog/services/web-pages.jsonnet#L60),\n[go server](https://gitlab.com/gitlab-com/runbooks/-/blob/168397dee0ad955bfb473fd0ddb9146667eeaa13/metrics-catalog/services/web-pages.jsonnet#L80),\nand [time to write HTTP headers](https://gitlab.com/gitlab-com/runbooks/-/blob/168397dee0ad955bfb473fd0ddb9146667eeaa13/metrics-catalog/services/web-pages.jsonnet#L113).\nHaving these in code we can automatically generate Prometheus [recording rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/)\nand [alerting rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/)\nfollowing [multiple burn rate alerts](https://sre.google/workbook/alerting-on-slos/#5-multiple-burn-rate-alerts).\nEvery time we start burning through our 30-day error budget for an SLI too fast\nwe page the SRE on-call to investigate and solve the problem.\n\nThis setup has been working well for us for over two years now, but one big\npain point remained when there was a service-wide degradation. The SRE on-call\nwas getting paged **for every SLI** associated with a service or its\ndownstream dependencies, meaning they can get up to 10 pages per service since\nthe service has 3-5 SLIs on average and we also have regional and canary SLIs.\nThis gets very distracting, it's stress-inducing, and it also doesn't let the\non-call focus on solving the problem but just acknowledges pages. For example\nbelow we can see the on-call getting paged 11 times in 5 minutes for the same\nservice.\n\n![web-pages alert storm](https://about.gitlab.com/images/blogimages/reducing-pager-fatigue/web-pages-alert-storm.png){: .shadow}\n\nWhat is even worse is when we have a site-wide outage, where the on-call can\nend up getting 50+ pages because all services are in a degraded state.\n\n![site wide outage alert storm](https://about.gitlab.com/images/blogimages/reducing-pager-fatigue/site-wide-outage-alert-storm.png)\n\nIt was a big problem for the quality of life for the on-call and we needed to\nfix this. We started doing some research on how to best solve this problem and\nopened an issue to [document all possible\nsolutions](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/15721).\nAfter some time we decided to go with grouping alerts by service and\nintroducing service dependencies for alerting/paging.\n\n## Group alerts by service\n\nThe smallest and most effective iteration was to group the alerts by the\nservice. Taking the previous example where the `web-pages` service paged the\non-call 11 times, it should have only paged the on-call once, and shown\nwhich SLIs were affected. We use [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/) for\nall our alerting logic, and this already had a feature called\n[grouping](https://prometheus.io/docs/alerting/latest/alertmanager/#grouping)\nso we could group alerts by labels.\n\nThis is what an alert looks like in our Prometheus setup:\n```\nALERTS{aggregation=\"regional_component\", alert_class=\"slo_violation\", alert_type=\"symptom\", alertname=\"WebPagesServiceServerApdexSLOViolationRegional\", alertstate=\"firing\", component=\"server\", env=\"gprd\", environment=\"gprd\", feature_category=\"pages\", monitor=\"global\", pager=\"pagerduty\", region=\"us-east1-d\", rules_domain=\"general\", severity=\"s2\", sli_type=\"apdex\", slo_alert=\"yes\", stage=\"main\", tier=\"sv\", type=\"web-pages\", user_impacting=\"yes\", window=\"1h\"}\n```\n\nAll alerts have the `type` label attached to them to specify which service they\nbelong to. We can use this label and the `env` label to group all the\nproduction alerts that are firing for the `web-pages` service.\n\n![grouping alerts by the `type` and `env` labels](https://about.gitlab.com/images/blogimages/reducing-pager-fatigue/group-alerts-by-service.png)\n\nWe also had to update our Pagerduty and Slack templates to show the right\ninformation. Before we only showed the alert title and description but this had\nto change since we are now alerting by service rather than by 1 specific SLO.\nYou can see the changes at [runbooks!4684](https://gitlab.com/gitlab-com/runbooks/-/merge_requests/4684).\n\n![Before and after on pages](https://about.gitlab.com/images/blogimages/reducing-pager-fatigue/before-after-slack-alerts.png)\n\nThis was already a big win! The on-call now gets a page saying \"service\nweb-pages\" and then the list of SLIs that are burning through the error budget - we went from 11 pages to 1 page!\n\n## Service Dependencies\n\nHowever we still had the problem that when a downstream service (such as the database)\nstarts burning through the error budget, it has a cascading effect where  `web`,\n`git`, and `api` will also start burning through the error budget and page the\non-call for each service. That was the next thing that we had to solve.\n\nWe needed some way to not alert on the `api` service if the `patroni`\n(database) service was burning through the error budget because it's clear if the\ndatabase is degraded the `api` service will end up degraded as well. We used\nanother feature of Alertmanager called\n[inhibition](https://prometheus.io/docs/alerting/latest/alertmanager/#inhibition)\nwhere we can tell Alertmanager to not alert on `api` if some alerts on `patroni`\nare already firing.\n\n![visualization of how inhibit rules work](https://about.gitlab.com/images/blogimages/reducing-pager-fatigue/inhibit-alerts.png)\n\nI've mentioned that all of our SLIs/SLOs are inside of the\n[metrics-catalog](https://gitlab.com/gitlab-com/runbooks/-/tree/168397dee0ad955bfb473fd0ddb9146667eeaa13/metrics-catalog)\nso it was a natural fit to add dependencies there, and this is exactly what\nwe did in [runbooks!4710](https://gitlab.com/gitlab-com/runbooks/-/merge_requests/4710). With this\nwe can specify that an SLI depends on another SLI of a different service which\nwill automatically create\n[`inhibit_rules`](https://prometheus.io/docs/alerting/latest/configuration/#inhibit_rule)\nfor Alertmanager.\n\nSince inhibit rules could potentially prevent alerting someone, we've used\nthese sparingly. To avoid creating inhibit rules too broadly, we've implemented\nthe following restrictions:\n1. An SLI can't depend on an SLI of the same service.\n1. The SLI has to exist for that service.\n1. We only allow equal operations, no regex on SLIs.\n\nAfter that it was only a matter of adding the `dependsOn` on each service for example:\n1. [`web` depends on `patroni`](https://gitlab.com/gitlab-com/runbooks/-/merge_requests/4735)\n1. [`api` depends on `patroni`](https://gitlab.com/gitlab-com/runbooks/-/merge_requests/4750)\n1. [`web-pages` depends on `api`](https://gitlab.com/gitlab-com/runbooks/-/merge_requests/4813)\n\nThe `web-pages` inhibit rule shows a chain of dependencies from `web-pages ->\napi -> patroni`, so if `patroni` is burning through the error budget it will\nnot page for `api` and `web-pages` services anymore!\n\n## How it's working\n\nWe have been using alert grouping and service dependencies for over a month now, and we have already seen some improvements:\n\n1. The on-call only gets paged once per service.\n1. When there is a large site-wide outage they only get paged 5-10 times since we have external probes that also alert us.\n1. There is an overall downward trend on pages for the on-call as seen below.\n\n![pages trend](https://about.gitlab.com/images/blogimages/reducing-pager-fatigue/pages-trend.png)\n\nCover image by [Yaoqi](https://unsplash.com/@yaoqiqiqilai) on [Unsplash](https://unsplash.com/photos/7iatBuqFvY0)\n{: .note}\n",[1286],{"slug":2601,"featured":6,"template":678},"reducing-pager-fatigue-and-improving-on-call-life","content:en-us:blog:reducing-pager-fatigue-and-improving-on-call-life.yml","Reducing Pager Fatigue And Improving On Call Life","en-us/blog/reducing-pager-fatigue-and-improving-on-call-life.yml","en-us/blog/reducing-pager-fatigue-and-improving-on-call-life",{"_path":2607,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2608,"content":2614,"config":2620,"_id":2622,"_type":16,"title":2623,"_source":17,"_file":2624,"_stem":2625,"_extension":20},"/en-us/blog/tips-for-managing-monorepos-in-gitlab",{"title":2609,"description":2610,"ogTitle":2609,"ogDescription":2610,"noIndex":6,"ogImage":2611,"ogUrl":2612,"ogSiteName":692,"ogType":693,"canonicalUrls":2612,"schema":2613},"5 Tips for managing monorepos in GitLab","Learn the benefits of operating a monolothic repository and how to get the most out of this structure.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667591/Blog/Hero%20Images/code-review-blog.jpg","https://about.gitlab.com/blog/tips-for-managing-monorepos-in-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 Tips for managing monorepos in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarah Waldner\"}],\n        \"datePublished\": \"2022-07-12\",\n      }",{"title":2609,"description":2610,"authors":2615,"heroImage":2611,"date":2617,"body":2618,"category":14,"tags":2619},[2616],"Sarah Waldner","2022-07-12","\nGitLab was founded 10 years ago on Git because it is the market leading version control system. As [Marc Andressen pointed out in 2011](https://www.wsj.com/articles/SB10001424053111903480904576512250915629460), we see teams and code bases expanding at incredible rates, testing the limits of Git. Organizations are experiencing significant slowdowns in performance and added administration complexity working on enormous repositories or monolithic repositories. \n\n## Why do organizations develop on monorepos? \n\nGreat question. While [some](https://www.infoworld.com/article/3638860/the-case-against-monorepos.html) might believe that monorepos are a no-no, there are valid reasons why companies, including  Google or GitLab (that’s right! We operate a monolithic repository), choose to do so. The main benefits are: \n\n- Monorepos can reduce silos between teams, streamlining collaboration on design, development, and operation of different services because everything is within the same repository.\n- Monorepos help organizations standardize on tooling and processes. If a company is pursuing a DevOps transformation, a monorepo can help accelerate change management when it comes to new workflows or the rollout of new tools.\n- Monorepos simplify dependency management because all packages can be updated in a single commit.\n- Monorepos offer unified CI/CD and build processes. Having all services in a single repository means that you can set up one system of pipelines for everyone.\n\nWhile we still have a ways to go before monorepos or monolithic repositories are as easy to manage as multi-repos in GitLab, we put together five tips and tricks to maintain velocity while developing on a monorepo in GitLab.\n\n**1. Use CODEOWNERS to streamline merge request approvals**\n\nCODEOWNERS files live in the repository and assign an owner to a portion of the code, making it super efficient to process changes. Investing time in setting up a robust [CODEOWNERS file](https://docs.gitlab.com/ee/user/project/codeowners/) that you can then use to automate [merge request approvals](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/) from required people will save time down the road for developers. \n\nYou can then set your merge requests so they must be approved by Code Owners before merge. CODEOWNERS specified for the changed files in the merge request will be automatically notified.\n\n**2. Improve git operation performance with Git LFS**\n\nA universal truth of git is that managing large files is challenging. If you work in the gaming industry, I am sure you’ve been through the annoying process of trying to remove a binary file from the repository history after a well-meaning coworker committed it. This is where [Git LFS](https://docs.gitlab.com/ee/topics/git/lfs/#git-large-file-storage-lfs) comes in. Git LFS keeps all the big files in a different location so that they do not exponentially increase the size of a repository.\n\nThe GitLab server communicates with the Git LFS client over HTTPS. You can enable Git LFS for a project by toggling it in [project settings](https://docs.gitlab.com/ee/user/project/settings/index.html#configure-project-visibility-features-and-permissions). All files in Git LFS can be tracked in the GitLab interface. GitLab indicates what files are stored there with the LFS icon.\n\n**3. Reduce download time with partial clone operations**\n\n[Partial clone](https://docs.gitlab.com/ee/topics/git/partial_clone.html#partial-clone) is a performance optimization that allows Git to function without having a complete copy of the repository. The goal of this work is to allow Git to better handle extremely large repositories.\n\nAs we just talked about, storing large binary files in Git is normally discouraged, because every large file added is downloaded by everyone who clones or fetches changes thereafter. These downloads are slow and problematic, especially when working from a slow or unreliable internet connection.\n\nUsing partial clone with a file size filter solves this problem, by excluding troublesome large files from clones and fetches. \n\n**4. Take advantage of parent-child pipelines**\n\n[Parent-child pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html) are where one pipeline triggers a set of downstream pipelines in the same project. The downstream pipelines still execute in the same stages or sequence without waiting for other pipelines to finish. Additionally, child pipelines reduce the configuration to the child pipeline, making it easier to interpret and understand. For monorepos, using parent-child pipelines in conjunction with `rules:changes` will only run pipelines on specified files changes. This reduces wasted time running pipelines across the entire repository.  \n\n**5. Use incremental backups to eliminate downtime** \n\n[Incremental backups](https://docs.gitlab.com/ee/raketasks/backup_restore.html#incremental-repository-backups) can be faster than full backups because they only pack changes since the last backup into the backup bundle for each repository. This is super useful when you are working on a large repository and only developing on certain parts of the code base at a time.\n\n## Where we are headed\n\nWhile these tips have helped many customers migrate from other version control systems to GitLab, we know there is still room for improvement. Over the next year, you will see us working on the following projects. We’d LOVE to hear from you, so share your thoughts, ideas, or simply 👍 on an issue to help prioritize things that will make your life easier.\n\n- [Git for enormous repositories](https://gitlab.com/groups/gitlab-org/-/epics/773)\n- [Expand SAST scanner support for monorepos](https://gitlab.com/groups/gitlab-org/-/epics/4895)\n- [Allow Reports to be Namespace to support monorepos](https://gitlab.com/gitlab-org/gitlab/-/issues/299490)\n",[894,749,704,675,727],{"slug":2621,"featured":6,"template":678},"tips-for-managing-monorepos-in-gitlab","content:en-us:blog:tips-for-managing-monorepos-in-gitlab.yml","Tips For Managing Monorepos In Gitlab","en-us/blog/tips-for-managing-monorepos-in-gitlab.yml","en-us/blog/tips-for-managing-monorepos-in-gitlab",{"_path":2627,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2628,"content":2634,"config":2641,"_id":2643,"_type":16,"title":2644,"_source":17,"_file":2645,"_stem":2646,"_extension":20},"/en-us/blog/how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two",{"title":2629,"description":2630,"ogTitle":2629,"ogDescription":2630,"noIndex":6,"ogImage":2631,"ogUrl":2632,"ogSiteName":692,"ogType":693,"canonicalUrls":2632,"schema":2633},"Bamboo Server to GitLab CI migration: Advanced techniques","A real-world look at how a migrated CI/CD infrastructure will work in GitLab CI.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679081/Blog/Hero%20Images/jenkins-migration.jpg","https://about.gitlab.com/blog/how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to migrate Atlassian's Bamboo server's CI/CD infrastructure to GitLab CI, part two\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ivan Lychev\"}],\n        \"datePublished\": \"2022-07-11\",\n      }",{"title":2635,"description":2630,"authors":2636,"heroImage":2631,"date":2638,"body":2639,"category":14,"tags":2640},"How to migrate Atlassian's Bamboo server's CI/CD infrastructure to GitLab CI, part two",[2637],"Ivan Lychev","2022-07-11","\nIn [part one of our series](/blog/migration-from-atlassian-bamboo-server-to-gitlab-ci/), I showed you how to migrate from Atlassian’s Bamboo Server to GitLab CI/CD. In this blog post we’re going to take a deep dive into how it works from a user’s perspective.\n\n## Get started\n\nYou’ve deployed the demo so it’s time to play with it to understand how it works.\n\nLet's imagine that one of the members of our project is John Doe. He is a software engineer responsible for developing some components (app1, app2, and app3) of the entire product, and he and his team would like to test those components in several combinations in myriad preview environments. So, what does that look like?\n\nFirst of all, let’s make some commits to the app1, app2, and app3 source code and get successful builds upon those commits.\n\nAfter that, we should create releases for those apps to be able to deploy them (as the deployment part of the apps CI config only shows when being triggered by a Git tag, i.e., a GitLab release). A release can be created by launching the last step (`manual-create-release`) in a commit pipeline. That would give us a new release with the ugly name containing the date and commit SHA in the patch part (in accord to `semver` scheme):\n\n\n\n![app_gitlab_release](https://about.gitlab.com/images/blogimages/app_gitlab_release.png)\n\nOn the `Tags` tab for the same app you now can see a deployment part of the pipeline has been triggered by the just created GitLab release but no actual environments to deploy are displayed (the `_` item in the `Deploy-nonprod` stage is not an env):\n\n\n![absent_envs](https://about.gitlab.com/images/blogimages/absent_envs.png)\n\n\n## Create an environment\n\nBut before that we have to briefly switch to another team who is responsible for preparing infrastructure IaC templates. Navigate to the `infra/environment-blueprints` project and pretend you are a member of that team doing their job. Namely, imagine you have just created some initial set of IaC files (they are already kindly prepared by me and present in the repository). You’ve tested them and now you feel that they are ready to be used by the other members of the project. You indicate such a readiness of a particular version of the IaC files by giving it a GitTag. Let’s put a tag like `v1.0.0` onto the HEAD version.\n\nYou will see how the tags are going to be used immediately. But first let's make some changes to the IaC files (e.g., add a new resource for some of the apps) and create a second Git tag, let's say `v1.1.0`. So, at this moment we have two versions of IaC templates (or `blueprints`) for our infrastructure - `v1.0.0` and `v1.1.0`.\n\n## Deploy an app into the environment\n\nNow we can return back to John and his team. We assume John is somehow informed that the version of the IaC templates he should use is `v1.0.0`. He wants to create a new preview environment out of the IaC templates of that version and put app1 and app2 into that env. \n\n(Here starts a description of how a user interoperates with the `infrastructure-set` Git repo. Notice that though the eventual idea is that it should be a Merge Request workflow – where you first get a Terraform plan within a Merge Request and can apply such a plan by merging the MR – which is widely advocated by GitLab but for the sake of simplicity here the MR workflow is not implemented and instead direct push commits into a branch are made).\n\nJohn wants the env to be named `preview-for-johns-team`. He creates a new branch in the `infrastructure-set` repo with that name and puts two files into it: a `version.txt` containing text `v1.0.0` and `apps.txt` with text `app1 app2` inside (the files format and its content is utterly simplified). \n\nThe `infrastructure-set` pipeline is triggered by the new branch and first generates a Terraform plan using the set of the Terraform files indicated by the tag specified in `version.txt`. John reviews the plan and wants to proceed with creating the environment by starting the `Terraform-apply` stage:\n\n\n![new_env_pipeline](https://about.gitlab.com/images/blogimages/new_env_pipeline.png)\n\n\n(To store the Terraform plan as artifact and Terraform state the embedded features of GitLab are leveraged - [Package Registry](https://docs.gitlab.com/ee/user/packages/package_registry/) and [Terraform HTTP back-end by GitLab](https://docs.gitlab.com/ee/user/infrastructure/iac/terraform_state.html).)\n\nNow return to the `app1` project and rerun the pipeline for the app1 release we created previously to make it regenerate a list of environments to deploy. You should see that the `preview-for-johns-team` item has appeared in the list of the environments:\n\n\n![new_env_in_the_deploy_pipeline](https://about.gitlab.com/images/blogimages/new_env_in_the_deploy_pipeline.png)\n\n\nClick the arrow button to deploy. Then refer to the `Deployments/Environments` section of the `app1` project to ensure a new env with the app1 release deployed into it is displayed.\n\nWe have successfully created a new environment and deployed one of the apps into it!\n\nNotice that although the above describes how users manually deploy the applications into an environment after it has been created which doesn’t look really convenient, in a real life scenario we most likely would have some additional step in the `infrastructure-set` pipeline that runs after Terraform successfully finishes creating an environment and triggers deployment pipelines for all the applications specified in the `apps.txt`. In that situation, we would need to establish which versions of the applications should be deployed in such an automated manner - for example, those might be the latest versions available for each app or the versions currently deployed to production, etc.\n\n## Update an environment's infrastructure\n\nJohn got notified that a new version of the infrastructure templates is available (you remember that `v1.1.0` tag in the `environment-blueprints` repo?). His team wants to assess how app1 would work within the new conditions. They decide to update an existing env, namely `preview-for-johns-team`, for that purpose. \n\nJohn walks to the `preview-for-johns-team` branch of the `environment-set` repo and changes `version.txt`'s content from `v1.0.0` to `v1.1.0`. The branch pipeline gets triggered and first shows John a Terraform plan for a diff comparing the current state of the environment. After reviewing and accepting that diff, John proceeds with actual updating the environment by launching `Terraform-apply` stage. That's it!\n\n## Advantages and disadvantages\n\n### Virtues\n\nGiven that this case assumes migrating from some existing CI/CD infrastructure based on Atlassian Bamboo with a lot of users who are familiar with it, the proposed solution leverages the native capabilities of GitLab so that it mostly keeps the concepts and workflows used with Bamboo. This strategy makes the process of migration more smooth for the users.\n\nThe solution sticks to the GitOps tenets and empowers a project with all the virtues provided by Git. For example, it's usually easy to track any changes in the infrastructure back to Git repos. (It may not be so easy for the `environment-set` project where we do not have the infrastructure changes captured in Git commits, but in that case a task of finding differences between two states of a particular environment can be accomplished by fetching the two versions of the `environment-blueprints` repo corresponding to those states denoted in the `version.txt` and figuring out the differences by using any apt tool.)\n\nThe solution tends to support user self-service where most of the tasks of changing the infrastructure can be performed only by those familiar with the basics of Git and Terraform. As a result, it offloads the DevOps team from some part of the work and removes dependence on the Ops department which comes in really handy, especially for large-scale projects.\n\n### Shortcomings\n\nBesides the mentioned deficits which stem from the necessity to utterly simplify all the aspects of this demo to make it comprehensible and possible to prepare in a sensible amount of time, this solution possesses some shortcomings that have to be resolved by using external tools to make this solution appropriate for a real life usage.\n\nFor example, there is no way to have a central dashboard with an aggregated view of all the environments with all the apps and their versions deployed into the envs. This would require creating some custom SPA web app which would gather information from GitLab via API.\n",[110,894,726],{"slug":2642,"featured":6,"template":678},"how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two","content:en-us:blog:how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two.yml","How To Migrate Atlassians Bamboo Servers Ci Cd Infrastructure To Gitlab Ci Part Two","en-us/blog/how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two.yml","en-us/blog/how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two",{"_path":2648,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2649,"content":2654,"config":2660,"_id":2662,"_type":16,"title":2663,"_source":17,"_file":2664,"_stem":2665,"_extension":20},"/en-us/blog/migration-from-atlassian-bamboo-server-to-gitlab-ci",{"title":2650,"description":2651,"ogTitle":2650,"ogDescription":2651,"noIndex":6,"ogImage":2478,"ogUrl":2652,"ogSiteName":692,"ogType":693,"canonicalUrls":2652,"schema":2653},"Migrating from Bamboo Server to GitLab CI: Getting started","Theoretical reasoning and practical proposal on migrating an existing CI/CD infrastructure of some multi-component application from Bamboo Server to GitLab CI","https://about.gitlab.com/blog/migration-from-atlassian-bamboo-server-to-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to migrate Atlassian Bamboo Server's CI/CD infrastructure to GitLab CI, part one\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ivan Lychev\"}],\n        \"datePublished\": \"2022-07-06\",\n      }",{"title":2655,"description":2651,"authors":2656,"heroImage":2478,"date":2657,"body":2658,"category":14,"tags":2659},"How to migrate Atlassian Bamboo Server's CI/CD infrastructure to GitLab CI, part one",[2637],"2022-07-06","\n\nWhen I faced a task of migrating from `Atlassian Bamboo Server` to `GitLab CI/CD`, I was not able to find any comprehensive information regarding something similar. So I designed a process on my own. This demo shows how to migrate a CI/CD structure for an existing multi-component application from a discontinued [Atlassian Bamboo Server](https://www.atlassian.com/migration/assess/journey-to-cloud) to [GitLab CI/CD](https://docs.gitlab.com/ee/index.html) (Community Edition).\n\nThe accompanying repository is https://gitlab.com/iLychevAD/ci-cd-for-a-multi-component-app.\n\nIn this first part of a two-part series, you will find a description of the current state of affairs - i.e., how the CI/CD has been organized within Bamboo Server, how the Bamboo Build and Deploy plans are designed for bootstrapping infrastructure and deploying the components of the application, and the architecture of the application itself.\n\nAnd in part two, we'll take a deeper look at the virtues of `GitLab CI/CD`.\n\n## Initial state\n\n(Note: This is not a description of some particular project but more a kind of compilation of several projects I worked on.)\n\nThe application solution allows the client to fulfill a particular business purpose (the nature of which is not relevant here and thus not specified) and consists of more than 50 discrete components (further referred to as `applications` or just `apps` or `components`). I refrain from calling them microservices as each of them looks more like a full-fledged application communicating with other siblings using REST API and messages in Kafka topics. Some of them expose a web UI to external or internal users and some are just utility parts serving the needs of other components or performing internal operations, etc.\n\nCode for each app is stored in its own Git repository (further just `repo`). So, a `multi-repo` approach is used for them. Each app may be written in different languages and packaged as one or several OCI-images for deployment.\n\nEach app repo looks like:\n```\n📦 \u003Csome-app-git-repo>\n ┣ 📂src \u003C-- application source code\n ┣ 📂docker-compose\n ┃ ┗ 📜docker-compose.yml \u003C-- analogue of K8s manifests\n ┗ 📜Dockerfile \u003C-- conventionally, \"Dockerfile\" name is used for OCI image specification file\n```\n\nFor running the applications, the client uses an outdated orchestration system (one from pre-Kubernetes epoch). So each app repo contains a Docker-compose compatible file describing deployment directives for that outdated orchestration system (in essence, similar to Kubernetes Deployment manifests). \n\nFor all of the build and deploy activities Atlassian Bamboo Server is used. \n\nSome details for those not familiar with the Bamboo Server - in an opinionated manner it explicitly separates so-called `build` pipelines and `deployment` pipelines. The former are supposed to build application code and produce some artifacts for further deployment (in our case those artifacts are OCI images uploaded to OCI registry and docker-compose.yml files referring to those images). The latter ones are supposed to take some particular set of artifacts and apply them to some particular `environment`. An `environment` (referred to `env` in the future for brevity) here is just an abstract deployment target characterized by a set of environment variables attached to it and exposed to the apps deployed into it. In reality, an `env` is implemented as a set of resources (virtual machines, databases, object storage locations, etc.) required by the applications.\n\nIn Bamboo, one `build` pipeline usually corresponds to one `deployment` pipeline so when the latter is started it just takes the artifacts from the attached `build` pipeline as input. \n\nThe client uses a `production` env, `preproduction` env, and numerous (up to several hundreds) so-called `staging` (short-lived) envs where different development teams and software engineers can test various combinations of the apps (here we assume that they have ~80-100 distinguish components of the application solution and several hundreds of software developers which gives a lot of possible combinations and requires so many `staging` envs).\n\nRoughly, a configuration of a `deploy` pipeline consists of a specification of the source artifacts (which are provided by the attached `build` pipeline as described earlier) and a specification of the set of envs where those artifacts (effectively, an application) can be deployed to.\n\nCurrent installation uses sophisticated dynamic generation of envs set for each app deployment pipeline. Roughly speaking, they have a central configuration file with the list of all existing envs where for each env a list of apps allowed to be deployed to it is denoted. Each time the file is modified (i.e., an env is created or deleted), the deployment pipelines are automatically being updated so as in the result each of them contains a list of envs corresponding for each app. You will have more idea about this aspect when you have looked at the implementation section later.\n\nIn the Bamboo UI this looks like:\n\n![envs_list_on_build_result_page](https://about.gitlab.com/images/blogimages/migration-from-atlassian-bamboo-server-to-gitlab-ci/envs_list_on_build_result_page.png)\n\nHere you can see an application build result page where on the right-hand side under the `Included in deployment project` title you can see a list of envs into which you can deploy the application. (Keep in mind that besides `build` and `deployment` pipelines, the Bamboo also uses a notion of `releases` - this is just some kind of an intermediate entity that should be created out of a build result to make it possible to deploy that build into some env). The `cloud-with-upwards-arrow` button in the `Actions` column starts a corresponding `deploy` pipeline with automatically passing the link to a build result (in a form of a `release` entity in Bamboo terminology) and the name of the env next to which the button has been clicked (the procedure of how a list of envs is created for a `deploy` pipe is described above).\n\nA concept of a `release` is specific to Bamboo Server, though it provides some amenities. For example, on the Release details page you can see a list of envs where a release has been deployed to. On the `Commits` tab you can backtrack a release to the application code in a SVC. And the `Issues` tab shows attached Jira tickets.\n\n![bamboo_release_details](https://about.gitlab.com/images/blogimages/migration-from-atlassian-bamboo-server-to-gitlab-ci/bamboo_release_details.png)\nRelease details page\n{: .note.text-center}\n\nAn env details page also enumerates releases history for this env (in scope of one particular application though as an env is specified for each deployment pipeline individually):\n\n![bamboo_env_details](https://about.gitlab.com/images/blogimages/migration-from-atlassian-bamboo-server-to-gitlab-ci/bamboo_env_details.png)\nEnv details page \n{: .note.text-center}\n\nAnd upon clicking the `cloud-with-upwards-arrow` button the Bamboo shows diff of Jira tickets and commits in respect to the previous `release` (only if both releases are made from artifacts from the same Git branch):\n\n![deploy_launch_page](https://about.gitlab.com/images/blogimages/migration-from-atlassian-bamboo-server-to-gitlab-ci/deploy_launch_page.png)\nDeploy launch page\n{: .note.text-center}\n\nSo, in general, the current path from source control to an env for each app looks like:\n\n![svc_to_env_path](https://about.gitlab.com/images/blogimages/migration-from-atlassian-bamboo-server-to-gitlab-ci/svc_to_env_path.png)\n\nThe Build plans are triggered automatically upon Git commits or Git tags. Most of the Deployment plans are started by the project members manually when needed. Each Deploy plan contains a step that checks if a user who started the plan has permissions to deploy into an env (for example, only members of the team which owns an env are allowed to deploy to that env and the deployment to the production env is allowed only for a set of eligible project members).\n\n## The task\n\nThe task is to migrate the aforementioned design from Bamboo Server to `GitLab` while keeping a similar deployment scheme (leveraging GitLab's `Environments` feature).\n\nAlso the following should be considered:\n\n - team members (software engineers, quality assurance specialists) are supposed to be able to manage environments on their own in a user-friendly self-service manner.\n - there should not be any discrepancy in IaC for different environments (per `12-factor apps` best practices), i.e. for any kind of an environment, be it a development or production one, the same set of IaC (here - Terraform files) should be used.\n  - the core ideas and workflows established in the previous situation (implemented with Atlassian Bamboo) should be kept to make the migration smoother for the members of the projects (also sometimes referred to as just users). \n\n## Implementation\n\n### Implementation's GitLab groups\\projects structure\n\n```\n📦 \u003CGitLab root group>\n ┣ 📂 apps GitLab group\n ┃ ┣ 📃 app1 GitLab project\n ┃ ┣  ...\n ┃ ┗ 📃 appN GitLab project\n ┣ 📂 ci GitLab group\n ┃ ┣ 📃 library GitLab project\n ┃ ┗ 📃 oci-registry GitLab project\n ┗ 📂 infra GitLab group\n  ┣ 📃 environment-blueprints GitLab project\n  ┣ 📃 environment-set GitLab project\n  ┗ 📃 k8s-gitops GitLab project\n```\n\n*Description*:\n\nThe most important content is in the `ci/library` repo (the shared ci configs) and `environment-set` repo. The other repos don't require much attention: The `k8s-gitops` purpose is not implemented and the repo is empty, the `apps` group just imitates source code for some apps, and the `ci/oci-registry` serves a role of an OCI registry for the solution.\n\nThe `apps` GitLab group merely contains the apps source code per se. Each GitLab project in this group corresponds to one app. Each app repo is expected to contain the source code itself (in the `src` directory for example), a `k8s` directory with k8s manifests, and an OCI image specification file (traditionally often called `Dockerfile`). \n\nThe `ci` GitLab group contains the `ci/library` project that holds shared `.gitlab-ci.yaml` files used by other projects (in a manner similar to Jenkins' shared libraries) and the `ci/oci-registry` serves as an OCI-image registry for various images used by the demo project (it also contains a Git repository with gitlab-ci files to build some utility images with tools used in various pipelines). For simplicity, the latter stores all the images throughout all the projects of the demo, though it's clearly not the best choice for a real-life situation when different sets of images of a set of separate projects/registries should be created.\n\nThe `infra` group holds applications infrastructure creation related Git repositories:\n\nThe `infra/k8s-gitops` is mostly irrelevant to the topic of this demo. In this demo it's presumed that Kubernetes is used as a computation workload platform and when a k8s cluster is created for an environment all the k8s manifests are supposed to be put into this repo (where each branch corresponds to a single environment) to be consumed by a GitOps tool installed into the cluster.\n\nThe `infra/environment-blueprints` holds parametrized IaC templates describing all the resources required for a full-fleged environment. In this example, the Terraform is used as an IaC tool though the principles are similar for its analogs (CloudFormation, for instance). The blueprints are parametrized in such manner that in the defaults values they hold some sensible values (most likely set to different values depending on the kind of a environment they were used to bootstrap - for example, a production env and everything else). It's implied that there might coexist several versions of the blueprints (implemented by using Git branches or Git tags) so each environment (see the next paragraph about `infra/environment-set`) can explicitly specify which version it wants to use (in case of using Terraform by specifying Git reference in the module's `source` field).\n\nHere I would like once again to highlight a digression from the best practices. For simplicity in the `infra/environment-blueprints` repo all the parts of an environment are combined into one single Terraform module (or a workspace, or a Stack in CloudFormation's terminology). In that way all the resources are always updated or changed within a single `terraform apply` command, which is cumbersome for large infrastructures containing a lot of resources. For larger infrastructures it would be more manageable to split into disparate Terraform modules (or CloudFormation Stacks, or Azure ARM Resource Groups) and thus make it possible for the infrastructure to be changed/updated in parts according to which exact components of it have changed. This might raise another question - how to manage dependencies in between such parts if they are present? For that, we would use some kind of an external (in respect to the IaC tool itself) orchestration tool like AWS Step Functions... or even GitLab's DAG feature!\n\nFinally, the `infra/environment-set` project represents an actual expected state of resources for each environment (a branch corresponds to an environment). See the README.md file in the Git repo for details. In short, each branch here is meant to contain a `main.tf` file referring to some version of the blueprints in the `infra/environment-blueprints` project, a set of Terraform files with overrides for any default variables set in the blueprints modules and other utility files like with a list of users allowed to deploy to the environment (such a list is to be checked by the deployments job in the apps projects).\n\n### **Important!**\n\nWhile looking at the implementation keep im mind that this solution deliberately omits some crucial aspects of any project infrastructure like security or monitoring, just for the sake of keeping this solution manageable and comprehensible. Implementing security and monitoring aspects would make the solution cumbersome and much longer to prepare. That is also true for the `k8s-gitops` repository - it's implied that in a real-life solution this would actively participate in the deployment process and hold Kubernetes clusters state in a GitOps approach but currently, this repo is just a placeholder. In the practical guide later you will see a description of the process of controlling environments using different branches in the `infra/environment-set` project. Ideally, such a workflow should use Merge Requests though for simplicity this implementation skips using MRs.\n\nAnother important thing that's possible not clear in this solution is configuration management, i.e. how configuration settings unique to each environment are provided to the applications inside an environment. Well, given that our applications run within Kubernetes cluster and that the cluster state is placed into a dedicated repo (`k8s-gitops` in our case), the configuration settings situation is simple - for each app the Terraform files in the `infra/environment-blueprints` should output all the sensible configuration values for the resources (like S3 bucket names, RDS endpoint URLs, etc.). Then, using Terraform itself or some other tool to create/update an environment, an additional step would collect all those outputs, transform them into k8s ConfigMap manifests, and put them into the GitOps repo. \n\nFor the secrets, we can go several ways. The most simplistic (though not flexible and not easy for secret rotation) way is to use some kind of encryption at rest like Mozilla's SOPS so that the secrets are being encrypted when they are put into the GitOps repo and decrypted when deployed into K8s. Another (and better ?) way - do not store secrets at rest at all but use either a third-party tool like Hashicorp Vault (with dynamic secrets generation) or cloud native features like [AWS IAM Roles for Service Accounts](https://aws.amazon.com/blogs/containers/diving-into-iam-roles-for-service-accounts/).\n\n## Bootstrap the demo\n\nThe accompanying repository, https://gitlab.com/iLychevAD/ci-cd-for-a-multi-component-app, contains Terraform files that enable you to install a copy of the demo structure into your own GitLab account to see it in action:\n\n`*.tf` files in the root directory and in the `tf_modules` directory describe the structure and configuration of the GitLab projects and groups. In the `repo_content` directory there is a content for the GitLab repositories in the projects. The repositories are filled with those files by the Terraform scripts.\n\nThe demo was tested with GitLab Community Edition `15.0.0-pre revision 4bda1cc84df`. The Terraform scripts do not create any real resources but just imitate them using `null_resource` and `local-exec`.\n\nThe bootstrapping process is conducted inside a container image (see the steps below) so it's platform-agnostic and in terms of tools all you need to spin up the demo is some containerization engine installed on your PC (i.e., Docker, Podman, etc).\n\n**Steps**:\n\n1. In the GitLab web UI manually create a root group to bootstrap the demo into (see `root_gitlab_group.tf` for a web-link why it's not possible to automate). Notice its ID - you need to provide it at the next step.\n\n2. Clone this repository.\n    Download an official Hashicorp's Terraform image and enter its interactive shell. All the further commands are supposed to be performed inside that shell:\n    \n    ```\n    docker run --rm -it --name ci-cd-for-a-multi-component-app \\\n      -e TF_VAR_gitlab_token=\u003Cyour GitLab account access token> \\\n      -v \u003Cpath to a location where to store ssh key-pairs on your PC>:/deploy-keys \\\n      -e TF_VAR_deploy_key_readwrite=/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key.pub \\\n      -e TF_VAR_deploy_key_readonly=/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key.pub \\\n      -e TF_VAR_root_gitlab_group_id=\u003CGitLab group ID> \\\n      -v \u003Cpath to the directory where you cloned the project into>:/repo -w /repo \\\n      --entrypoint /bin/sh \\\n      public.ecr.aws/hashicorp/terraform:1.1.9\n    ```\n    \n    Explanation:\n    \n    `-e TF_VAR_gitlab_token=\u003Cyour GitLab account access token>` - Terraform's `gitlab` provider needs a GitLab access token with sufficient permissions to spin up the demo. Provide it as a Bash environment variable - `TF_VAR_gitlab_token` (see `provider.tf`). It is also used by the `upload_avatar` module.\n    \n    `-v \u003Cpath to a location where to store ssh key-pairs on your PC>:/deploy-keys` - on the left-hand side here specify some directory on your local PC where you would like to store SSH keys needed for deploying the demo. Thus they are persisted even if you exit the container. See bullet point `4` for more details.\n    \n    `-e TF_VAR_deploy_key_readwrite=/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key` and\n    \n    `-e TF_VAR_deploy_key_readonly=/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key` - set the names for the aforementioned keys\n    \n    `-v \u003Cpath to the directory where you cloned the project into>:/repo -w /repo` - we mount the project content from your local PC into the running container. Note that because of that the Terraform local state file will be stored inside that directory on your PC.\n\n3. Install tools - bash and curl:\n    \n    ```\n    apk add bash curl\n \n    /bin/bash\n    ```\n\n4. Upon bootstrapping the demo, the repositories' content is pushed into (i.e. is restored) from the `repo_content` directory. (When the demo is destroyed the content of the repositories is automatically pulled (i.e. is saved) into the same directory - probably you dont need this but I implemented that for my convinience during creating the demo.) We need to create an SSH key pair and need it be the same throughout both phases. In this step we generate it:\n    \n    ```ssh-keygen -t rsa -N '' -f /deploy-keys/ci-cd-for-a-multi-component-app-deploy-key \u003C\u003C\u003C y```\n    \n    ```chmod 0400 /deploy-keys/ci-cd-for-a-multi-component-app-deploy-key```\n    \n    A trick used in `tf_modules/gitlab_project_with_restore_backup/main.tf` requires that in the host section of the SSH public key the location of the private key is specified (in a form like `filename@~/.ssh/\u003Cfilename>`). Otherwise the `tf_modules/gitlab_project_with_restore_backup` won't work. Edit accordingly:\n    \n    ```sed -i -e 's|^\\(ssh-rsa .*\\) \\(.*\\)$|\\1 ci-cd-for-a-multi-component-app-deploy-key@/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key|' /deploy-keys/ci-cd-for-a-multi-component-app-deploy-key.pub```\n    ```\n\nNow you can proceed with bootstrapping the demo using Terraform:\n\nInitialize Terraform by `terraform init` so it installs all the providers.\n\nDeploy the demo with Terraform by `terraform apply`.\n\n**Notice**: During Terraform execution you may see an error:\n```\nError: POST https://gitlab.com/api/v4/projects/multi-component-app-root-group/ci/library/deploy_keys: 400 {message: {deploy_key.fingerprint_sha256: [has already been taken]}}\n\n```\nI believe this is some glitch in the GitLab API. To fix just run `terraform apply` once again until it shows no errors.\n\nAfter that you should see the following structure in GitLab in the root group:\n\n![gitlab_projects_tree](https://about.gitlab.com/images/blogimages/migration-from-atlassian-bamboo-server-to-gitlab-ci/gitlab_projects_tree.png)\n\nAll the projects should be filled with files from the `repo_content` directory.\n\nDo not delete the directory with the cloned project and the files created inside it if later you would want to clean up the things. See the next section for instructions.\n\n## Cleaning up\n\nLaunch a container image the same way you did for bootstrapping the demo (see the previous section). It's supposed that you didnt delete any files in `\u003Cpath to a location where to store ssh key-pairs on your PC>` and `\u003Cpath to the direcory where you cloned the project into>`: \n\n```\ndocker run --rm -it --name ci-cd-for-a-multi-component-app \\\n  -e TF_VAR_gitlab_token=\u003Cyour GitLab account access token> \\\n  -v \u003Cpath to a location where to store ssh key-pairs on your PC>:/deploy-keys \\\n  -e TF_VAR_deploy_key_readwrite=/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key.pub \\\n  -e TF_VAR_deploy_key_readonly=/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key.pub \\\n  -e TF_VAR_root_gitlab_group_id=\u003CGitLab group ID> \\\n  -v \u003Cpath to the direcory where you cloned the project into>:/repo -w /repo \\\n  --entrypoint /bin/sh \\\n  public.ecr.aws/hashicorp/terraform:1.1.9\n```\n\nInstall curl:\n\n```apk add curl```\n\nDo `terraform destroy`.\n\n**Notice**: You may see some errors regarding deleting the `oci-registry` project with OCI images. In that case just delete the images and remove the project manually or wait while GitLab does that itself later.\n\nNow if you want you can remove the cloned project directory and the `\u003Cpath to a location where to store ssh key-pairs on your PC>` directory.\n\nIf you would like to deploy the demo once again without removing the directory with the cloned repo dont forget to remove files created during the previous demo deployment, namely `terraform.tfstate` files in the root directory and `.git` directories everywhere in the `repo_content` directory.\n\nIn the [second part](/blog/how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two/) of this tutorial, we'll look at a real-world example of how this can work.\n\n\n\n\n\n",[110,894,726],{"slug":2661,"featured":6,"template":678},"migration-from-atlassian-bamboo-server-to-gitlab-ci","content:en-us:blog:migration-from-atlassian-bamboo-server-to-gitlab-ci.yml","Migration From Atlassian Bamboo Server To Gitlab Ci","en-us/blog/migration-from-atlassian-bamboo-server-to-gitlab-ci.yml","en-us/blog/migration-from-atlassian-bamboo-server-to-gitlab-ci",{"_path":2667,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2668,"content":2673,"config":2679,"_id":2681,"_type":16,"title":2682,"_source":17,"_file":2683,"_stem":2684,"_extension":20},"/en-us/blog/a-story-of-runner-scaling",{"title":2669,"description":2204,"ogTitle":2669,"ogDescription":2204,"noIndex":6,"ogImage":2670,"ogUrl":2671,"ogSiteName":692,"ogType":693,"canonicalUrls":2671,"schema":2672},"An SA story about hyperscaling GitLab Runner workloads using Kubernetes","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669897/Blog/Hero%20Images/kaleidico-26MJGnCM0Wc-unsplash.jpg","https://about.gitlab.com/blog/a-story-of-runner-scaling","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"An SA story about hyperscaling GitLab Runner workloads using Kubernetes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"},{\"@type\":\"Person\",\"name\":\"Brian Wald\"}],\n        \"datePublished\": \"2022-06-29\",\n      }",{"title":2669,"description":2204,"authors":2674,"heroImage":2670,"date":2676,"body":2677,"category":14,"tags":2678},[1701,2675],"Brian Wald","2022-06-29","\n\nThe following *fictional story*\u003Csup>1\u003C/sup> reflects a repeating pattern that Solutions Architects at GitLab encounter frequently. In the analysis of this story we intend to demonstrate three things: (a) Why one should be thoughtful in leveraging Kubernetes for scaling, (b) How unintended consequences of an approach to automation can create a net productivity loss for an organization (reversal of ROI) and (c) How solutions architecture perspectives can help find anti-patterns - retrospectively or when applied during a development process.\n\n### A DevOps transformation story snippet\n\nGild Investment Trust went through a DevOps transformational effort to build efficiency in their development process through automation with GitLab. Dakota, the application development director, knew that their current system handled about 80 pipelines with 600 total tasks and over 30,000 CI minutes so they knew that scaled CI was needed. Since development occurred primarily during European business hours, they were interested in reducing compute costs outside of peak work hours. Cloud compute was also a target due to acquring the pay per use model combined with elastic scaling.\n\nIngrid was the infrastructure engineer for developer productivity who was tasked with building out the shared GitLab Runner fleet to meet the needs of the development teams. At the beginning of the project she made a successful bid to leverage Kubernetes to scale CI and CD to take advantage of the elastic scaling and high availability all with the efficiency of containers. Ingrid had recently achieved the Certified Kubernetes Administrator (CKA) certification and she was eager to put her knowledge to practical use. She did some additional reading around applications running on Kubernetes and noted the strong emphasis on minimizing the resource profile of microservices to achieve efficiency in the form of compute density. She defined runner containers with 2GB of memory and 750millicores (about three quarters of a CPU) had good results from running some test CI pipelines. She also decided to leverage the Kubernetes Cluster Autoscaler which would use the overall cluster utilization and scheduling to automatically add and remove Kubernetes worker nodes for smooth elastic scaling in response to demand.\n\nAbout 3 months into the proof of concept implementation, Sasha, a developer team lead, noted that many of their new job types were failing with strange error messages. The same jobs ran fine on quickly provisioned GitLab shell runners. Since the primary difference between the environments was the liberal allocation of machine resources in a shell runner, Sasha reasoned that the failures were likely due to the constrained CPU and memory resources of the Kubernetes pods. \n\nTo test this hypothesis, Ingrid decided to add a new pod definition. She found it was difficult to discern which of the job types were failing due to CPU constraints, which ones due to memory constraints and which ones due to the combination of both. She knew it could be a lot of her time to discern the answer. She decided to simply define a pod that was more liberal on both CPU and memory and have it be selectable by runner tagging when more resources were needed for certain CI jobs. She created a GitLab Runner pod definition with 4GB of memory and 1750 millicores of CPU to cover the failing job types. Developers could then use these larger containers when the smaller ones failed by adding the ‘large-container’ tag to their GitLab job.\n\nSasha redid the CI testing and was delighted to find that the new resourcing made all the troubling jobs work fine. Sasha created a guide for developers to try to help discern when mysterious error messages and failed CI jobs were probably the fault of resourcing and then how to add a runner tag to the job to expand the resources.\n\nSome weeks later two of the key jobs that were fixed by the new container resourcing started intermittently failing on NPM package creation jobs for just 3 pipelines on 2 different teams. Of course Sasha tried to understand what the differences were and found that these particular pipelines were packaging notably large file sets because they were actually packaging testing data and the NPM format was a convenient way to provide testing data during automated QA testing.\n\nSasha brought this information to Ingrid and together they did testing to figure out that a 6GB container with 2500 millicores would be sufficient for creating an NPM package out of the current test dataset size. They also discussed whether the development team might want to use a dedicated test data management solution, but it turned out that the teams needs were very simple and that their familiarity with NPM packaging meant that bending NPM packaging to suit their purpose was actually more efficient than acquiring, deploying, learning and maintaining a special system for this purpose. So a new pod resourcing profile was defined and could be accessed with the runner tag ‘xlarge’.\n\nSasha updated the guide for finding the optimal container size through failure testing of CI jobs - but they were not happy with how large the document was getting and how imprecise the process was for determining when a CI job failure was, most likely due to container resource constraints. They were concerned that developers would not go through the process and instead simply pick the largest container resourcing profile in order to avoid the effort of optimizing and they shared this concern with Ingrid. In fact, Sasha noted, they were hard pressed themselves to follow their own guidelines and not to simply choose the largest container for all jobs themselves.\n\nThe potential for this cycle to repeat was halted several months later when Dakota, the app dev director, generated a report that showed a 2% increase in developer time spent optimizing CI jobs using failure testing for container size optimization. Dakota considered this work to be a net new increase because when the company was not using container-based CI, the developers did not have to manage this concern at all. Across 298 developers this amounted to around $840,000/yr dollars of total benefits per month\u003Csup>2\u003C/sup>. It was also thought to add about 2 hours (and growing) to developer onboarding training. It was noted that the report did not attempt to account for the opportunity cost tax - what would these people be doing to solve customer problems with that time? It also did not account for the \"critical moments tax\" (when complexity has an outsized frustration effect and business impact on high pressure, high risk situations).\n\n### Solution architecture retrospective: What went wrong?\n\nThis story reflects a classic antipattern we see at GitLab, not only with regard to Kubernetes runner optimization, but also across other areas, such as overly minimalized build containers and the potential for resultant pipeline complexity as was discussed in a previous blog called [When the pursuit of simplicity creates complexity in container-based CI pipelines](/blog/second-law-of-complexity-dynamics/). Frequently this result comes from inadvertent adherance to heuristics of a small part of the problem as though they were applicable to the entirety of the problem (a type of a logical “fallacy of composition”).\n\nThankfully the emergence of the anti-pattern follows a pattern itself :). Let’s apply a little retrospective solution architecture to the \"what happened\" in order to learn what might be done proactively next time to create better iterations on the next automation project.\n\nThere is a certain approach to landscaping shared greenspaces where, rather than shame people into compliance with signs about not cutting across the grass in key locations, the paths that humans naturally take are interpreted as the signal “there should be a path here.” Humans love beauty and detail in the environments they move through, but depending on the space, they can also value the efficiency of the shortest possible route slightly higher than aesthetics. A wise approach to landscaping holds these factors in a balance that reflects the efficiency versus aesthetic appeal balance of the space user. The space stays beautiful without any shaming required.\n\nIn our story Sasha and Ingrid had exactly this kind of cue where the developers were likely to walk across the grass. If that cue is taken to be a signal that reflects efficiency, we can quickly see what can be done to avoid the antipattern when it starts to occur.\n\nThe signal was the observation that developers might simply choose the largest container all the time to avoid the fussy process of optimizing the compute resources being consumed. Some would consider that laziness and not a good signal to heed. However, most human laziness is deeply rooted in efficiency trade-offs. The developers intuitively understand that their time fussing with failure testing to optimize job containers and their time diagnosing intermittent failures due to the varying content of those jobs, is not worth the amount of compute saved. That is especially true given the opportunity cost of not spending that time innovating the core software solution for the revenue generating application.\n\nIngrid and Sasha’s collaboration has initially missed the scaled human toil factor that was introduced to keep container resources at the minimum tolerable levels. They failed to factor in the escalating cost of scaled human toil to have a comprehensive efficiency measurement. They were following a microservices resourcing pattern which assumes the compute is purpose designed around minimal and well known workloads. When taken as a whole in a shared CI cluster, CI compute follows generalized compute patterns where the needs for CPU, Memory, Disk IO and Network IO can vary wildly from one moment to the next.\n\nIn the broadest analysis, the infrastructure team over indexed to the “team local” optimization of compute efficiency and unintentionally created a global de-optimization of scaled human toil for another team.\n\n## How can this antipattern be avoided?\n\nOne way to combat over indexing on a criteria is to have balancing objectives. This need is covered in \"Measure What Matters\" with the concept of counter balancing objectives. There are some counter balancing questions that can be asked of almost any automation effort. When solution architecture is functioning well these counter balancing questions are asked during the iterative process of building out a solution. Here are some applicable ones for this effort:\n\n**Approporiate Rules: Does the primary compute optimization heuristic match the characteristics of the actual compute workload being optimized?**\n\nThe main benefits of container compute for CI are dependency isolation, dependency encapsulation and a clean build environment for every job. None of these benefits has to do with the extreme resource optimizations available to engineer microservices architected applications. As a whole, CI compute reflects generalized compute, not the ultra-specialized compute of a 12 factor architected micro-service.\n\n**Appropriate granularity: Does optimization need to be applied at every level?**\n\nThe fact that the cluster itself has elastic scaling at the Kubernetes node level is a higher order optimization that will generate significant savings. Another possible optimization that would not require continuous fussing by developers is having a node group running on spot compute (as long as the spot compute runners self-identify their compute as spot so pipeline engineers can select appropriate jobs for spot). These optimizations can create huge savings, without creating scaled human toil.\n\n**People and processes counter check: Does the approach to optimization create scaled human toil by its intensity and/or frequency and/or lack of predictability for any people anywhere in the organization?**\n\nAutomation is all about moving human toil into the world of machines. While optimizing machine resources must always be a primary consideration, it is a lower priority objective than creating a net increase in human toil anywhere in your company. Machines can efficiently and elastically scale, while human workforces respond to scaling needs in months or even years.\n\n### Avoid scaled human toil\n\nNotice that neither the story, nor the qualifying questions, imply there is never a valid reason to have specialized runners that developers might need to select using tags. If a given attribute of runners could be selected once and with confidence then the antipattern would not be in play. One example would be selecting spot compute backed runners for workloads that can tolerate termination. It is the potential for repeated needed attention to calibrate container sizing - made worse by the possibility of intermittent failure based on job content - that pushes this specific scenario into the potential realm of “scaled human toil.” The ability to leverage elastic cluster autoscaling is also a huge help to managing compute resources more efficiently. \n\nIf the risk of scaled human toil could be removed then some of this approach may be able to be preserved. For example, having very large minimum pod resourcing and then a super-size for stuff that breaks the standard pod size just once. Caution is still warranted because it is still possible that developers have to fuss a lot to get a two pod approach working in practice.\n\n### Beware of scaled human toil of an individual\n\nOne thing the story did not highlight is that even if we were able to move all the fussing of such a design to the Infrastructure Engineer persona (perhaps by building an AI tuning mechanism that guesses at pod resourcing for a given job), the cumulative taxes on their role are frequently still not worth the expense. This is, in part, because they have a leveraged role - they help with all the automation of the scaled developer workforce and any time they spend on one activity can’t be spent on another. We humans are generally bad at accounting for opportunity costs - what else could that specific engineer be innovating on to make a stronger overall impact to the organization’s productivity or bottom line? Given the very tight IT labor market, a given function may not be able to add headcount, so opportunity costs take on an outsized importance.\n\n### Unlike people’s time, cloud compute does not carry opportunity cost\n\nA long time ago people had to schedule time on shared computing resources. If the time was used for low-value compute activities it could be taking away time from higher value activities. In this model compute time has an opportunity cost - the cost of what it could be using that time for if it wasn’t doing a lower value activity. Cloud compute has changed this because when compute is not being used, it is not being paid for. Additionally, elastic scaling eliminates the costs of over provisioning hardware and completely eliminates the administrative overhead of procuring capacity - if you need lots for a short period of time it is immediately available. In contrast, people time is not elastically scalable nor pay per use. This means that the opportunity cost question “What could this time be used for if it didn’t have to be spent on low value activities?” is still relevant for anything that creates activities for people.\n\n### The first corollary to the Second Law of Complexity Dynamics\n\nThe Second Law of Complexity Dynamics was introduced in an earlier blog. The essence is that complexity is never destroyed - it is only reformed - and primarily it is moved across a boundary line that dictates whether the management of the complexity is in our domain or externalized. For instance, if you write a function for md5 hashing in your code, you are managing the complexity of that code. If you install a dependency package that contains a premade md5 hash function that you simply use, then the complexity is externalized and managed for you by someone else.\n\nIn this story we are introducing the corollary to that “Law” that “**Exchanging Raw Machine Resources for Complexity Management is Generally a Reasonable Trade-off.**” In this case our scaled human toil is created due to the complexity of unending, daily management of optimizing compute efficiency. This does not mean that burning thousands of dollars of inefficient compute is OK because it saved someone 20 minutes of fussing. It is scoped in the following way:\n\n- scoped to “complexity management” (which is creating the “scaled human toil” in our story) - many minutes of toil that increases proportionally or compounds with more of the activity. \n- scoped to “raw machine resources” - meaning that there is not additional logistics nor human toil to gain the resources. In the cloud raw machine resources are generally available via configuration tweaks.\n- scoped to “generally reasonable” - this indicates a disposition of being very cautious about increasing human toil with an automatoin solution - but it still makes sense to use models or calculations to check if the rule actually holds in a given case.\n\nSo if we can externalize complexity management that is great (The Second Law of Complexity Dynamics). If we can trade complexity management for raw computing resource, that is likely still better than managing it ourselves (The First Corollary).\n\n### Iterating SA: Experimental improvements for your next project\n\nThis post contains specifics that can be used to avoid antipatterns in building out a Kubernetes cluster for GitLab CI. However, in the qualifying questions we’ve attempted to kick it up to one meta-level higher to help assess whether any automation effort may have an “overly local” optimization focus which can inadvertently create a net loss of efficiency across the more global “company context.” It is our opinion that automation efforts that create a net loss in human productivity should not be classified as automation at all. While it’s strong medicine to apply to one’s work, we feel that doing so causes appropriate innovation pressure to ensure that individual automation efforts truly deliver on their inherent promise of higher human productivity and efficiency. So simply ask “Does this way of solving a problem cause recurring work for anyone?”\n\n### DevOps transformation and solution architecture perspectives\n\nA technology architecture focus rightfully hones in on the technology choices for a solution build. However, if it is the only lens, it can result in scenarios like our story. Solutions architecture steps back to a broader perspective to sanity-check that solution iterations account for a more complete picture of both the positive and negative impacts across all three of people, processes and technology. As an organizational competency, DevOps emphasis solution architecture perspectives when it is defined as a collaborative and cultural approach to people, processes and technology.\n\nFootnotes:\n\n1. This fictional story was devised specifically for this article and does not knowingly reflect the details of any other published story or an actual situation. The names used in the story are from [GitLab’s list of personas](https://about.gitlab.com/handbook/product/personas/).\n2. Across a team of 300 full time developers. 9.6min/workday x 250 workdays / year = 2400mins / 8hrs/workday  = 5 workdays x $560 per day (140K Total Comp/250days) = $2800/dev/year x 300 developers = $840,000/yr\n\nCover image by [Kaleidico](https://unsplash.com/@kaleidico?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/)\n",[832,937,704,750],{"slug":2680,"featured":6,"template":678},"a-story-of-runner-scaling","content:en-us:blog:a-story-of-runner-scaling.yml","A Story Of Runner Scaling","en-us/blog/a-story-of-runner-scaling.yml","en-us/blog/a-story-of-runner-scaling",{"_path":2686,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2687,"content":2693,"config":2699,"_id":2701,"_type":16,"title":2702,"_source":17,"_file":2703,"_stem":2704,"_extension":20},"/en-us/blog/observability-vs-monitoring-in-devops",{"title":2688,"description":2689,"ogTitle":2688,"ogDescription":2689,"noIndex":6,"ogImage":2690,"ogUrl":2691,"ogSiteName":692,"ogType":693,"canonicalUrls":2691,"schema":2692},"Observability vs. monitoring in DevOps","Want to gain true and actionable visibility across your software development lifecycle? Observability is the answer.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665484/Blog/Hero%20Images/monitoring-update-feature-image.jpg","https://about.gitlab.com/blog/observability-vs-monitoring-in-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Observability vs. monitoring in DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mike Vanbuskirk\"}],\n        \"datePublished\": \"2022-06-14\",\n      }",{"title":2688,"description":2689,"authors":2694,"heroImage":2690,"date":2696,"body":2697,"category":14,"tags":2698},[2695],"Mike Vanbuskirk","2022-06-14","\nIn almost any modern software infrastructure, there is inevitably some form of monitoring or logging. The launch of syslog for Unix systems in the 1980s established both the value of being able to audit and understand what is going on inside a system, as well as the architectural importance of separating that mechanism.\n\nHowever, despite the value and importance of this visibility into system behavior, too often monitoring and logging are treated as an afterthought. There are countless instances of systems emitting logs into a void, never being aggregated or analyzed for critical information. Or infrastructure where legacy monitoring systems were installed a decade ago and never updated to modern standards.\n\nRecently, shifts in the operational landscape have given rise to the concept of observability. Rather than expect engineers to form their own assumptions about how their application is performing from static measurements, observability enables them to see a holistic picture of their application behavior, and critically, how a user perceives performance.\n\n> You’re invited! Join us on June 23rd for the [GitLab 15 launch event](https://page.gitlab.com/fifteen) with DevOps guru Gene Kim and several GitLab leaders. They’ll show you what they see for the future of DevOps and The One DevOps Platform.\n\n## What is observability?\nTo understand the value in observability, it's helpful to first establish an understanding of what monitoring is, as well as what it does and does not provide in terms of information and context.\n\nAt its core, monitoring is presenting the results of measurements of different values and outputs of a given system or software stack. Common metrics for measurement are things like CPU usage, RAM usage, and response time or latency. Classic logging systems are similar; a static piece of information about an event that occurred during system operation.\n\nMonitoring provides limited-context measurements that might indicate a larger issue with the system. Aggregation and correlation are possible using traditional monitoring tools, but typically require manual configuration and tuning to provide a holistic view. As the industry has advanced, the concept of what makes for effective monitoring has moved beyond static measurements of things like CPU usage. In its now-famous SRE book, Google emphasizes that you should focus on four key metrics, known as \"[Golden Signals](https://sre.google/sre-book/monitoring-distributed-systems/)\":\n\n- Latency: The time it takes to fulfill a request\n- Traffic: High-level measurement of overall demand\n- Errors: The rate at which requests fail\n- Saturation: Measurement of resource usage as a fraction of the whole; typically focuses on constrained resources\n\nWhile these metrics help home in on a better picture of overall system performance, they still require a non-trivial engineering investment to design, build, integrate, and configure a complete monitoring system. There is considerable effort involved in enumerating failure modes, and manually defining and associating the correct correlations in even simple cases can be time-consuming.\n\nIn contrast, observability offers a much more intuitive and complete picture as a first-class feature: You don’t need to manually correlate disparate monitoring tooling. An aggregated monitoring dashboard is only as good as the last engineer that built it; conversely, an observability platform adapts itself to present critical information in the right context, automatically. This can even extend further left into the software development lifecycle (SDLC), with observability tooling providing important performance feedback during CI/CD runs, giving developers operational feedback about their code.\n\nUltimately, observability provides more holistic debugging and understanding. Observability data can show the “unknown unknowns” to better understand production incidents. For more context into \"why\" that's important, the next section highlights an excellent example where monitoring might fall short and where observability fills in the crucial story.\n\n## Why focus on observability?\nFocusing on observability can help drive down mean time to resolution (MTTR), resulting in shorter outages, better application performance, and improved customer experience. While it may seem at first glance that monitoring can provide the same advantages, consider the anecdote that follows.\n\nAn engineering organization gets a ping from the accounting department; the invoice for cloud services is getting expensive, so much so that the CFO has noticed. DevOps engineers have pored over the monitoring system to no avail; every part of the system has consistently reported being in the green for things like memory, CPU, and disk I/O. As it turns out, the root cause was another \"unknown unknown\" event: DNS latency in the CI/CD pipelines was causing builds to fail at an elevated rate. Builds needing more retries consumed a great number of cloud resources. However, this effect never persisted long enough to reflect in the monitoring system. By adding observability tooling and collecting all event types in the environment, ops was able to zero in on the source of the problem and remediate it. In a traditional monitoring system, the organization would have had to have known about the DNS latency problem a priori.\n\nObservability is also important for non-technical stakeholders and business units. As technology becomes more intertwined with the primary profit silo, software infrastructure KPIs become business KPIs. Observability can provide better insight into KPI performance, as well as self-service options for different teams.\n\nModern software and applications depend heavily on providing good user experience (UX). As the previous story illustrates, monitoring static metrics won't always tell the complete story about UX or system performance. There might be serious issues lurking behind seemingly healthy metric dashboards.\n\n## Key observability metrics\nFor organizations that have decided to implement observability tooling, the next step is to identify the core goals of observability, and how that can best be implemented across their stack.\n\nAn excellent place to start is with the three fundamental pillars of observability:\n- Logs: Information and Events\n- Metrics: Measurements of specific metrics and performance data\n- Tracing: Logging end-to-end request performance during runtime\n\nAlthough this can seem overwhelming, projects like [OpenTelemetry](https://opentelemetry.io/) are helping to drive broad standards acceptance for logging, metrics, and tracing, enabling a more consistent ecosystem and a shorter time-to-value for organizations that implement observability with tooling built on OpenTelemetry standards.\n\nAdditional observability data and pillars include\n- Error tracking: more granular logs with aggregation\n- Continuous Profiling: evaluating granular code performance\n- Real User Monitoring (RUM): Understand application performance from the perspective of an actual user\n\nLooking at these pillars, a central theme starts to emerge; it's no longer enough to look at a small slice of time and space in modern distributed systems, a holistic, 10,000-foot view is needed. Understanding application performance starts with sampling it as an actual customer experiences it, and then further monitoring the complete performance and behavior of their interaction with your software.\n\nBeyond traditional application monitoring, observability can help improve the operational excellence posture for any engineering organization. Well-crafted alerts and incident management programs are usually born out of hard lessons from real outages. Implementing [chaos engineering](https://principlesofchaos.org/) can test observability platforms during real failures, albeit in a controlled environment with known outcomes. Introducing chaos engineering into systems where \"unknown unknowns\" might hide, not just in your production workloads but your CI/CD pipelines, supply chain, and DNS can yield significant gains in operational footing.\n\n## Observability is a critical part of DevOps\nNot only is observability critical for DevOps, but also for the entire organization. Replacing the static data of legacy monitoring solutions, [observability](/direction/monitor/platform-insights/) provides a full-spectrum view of application infrastructure.\n\nDevOps teams should be working with stakeholders to share observability metrics in a way that benefits the entire organization, as well as take steps to improve the implementation. Learning, and then evangelizing the benefits of app instrumentation to development teams can make observability even more effective. DevOps teams can also help identify the root cause of production incidents faster; well-instrumented application code makes it easy to distinguish from infrastructure issues. Finally, shifting observability left along the CI/CD pipeline means potential service-level objective (SLO) deltas are caught before they reach production.\n\nDevOps teams looking to provide meaningful improvements to application performance and business outcomes can look to observability as a way to deliver both.\n\n**Watch now: Senior Developer Evangelist Michael Friedrich digs deeper into the shift from monitoring to observability:**\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/BkREMg8adaI\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[894,1307,704],{"slug":2700,"featured":6,"template":678},"observability-vs-monitoring-in-devops","content:en-us:blog:observability-vs-monitoring-in-devops.yml","Observability Vs Monitoring In Devops","en-us/blog/observability-vs-monitoring-in-devops.yml","en-us/blog/observability-vs-monitoring-in-devops",{"_path":2706,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2707,"content":2712,"config":2718,"_id":2720,"_type":16,"title":2721,"_source":17,"_file":2722,"_stem":2723,"_extension":20},"/en-us/blog/second-law-of-complexity-dynamics",{"title":2708,"description":2709,"ogTitle":2708,"ogDescription":2709,"noIndex":6,"ogImage":2478,"ogUrl":2710,"ogSiteName":692,"ogType":693,"canonicalUrls":2710,"schema":2711},"How pursuit of simplicity complicates container-based CI","Simplicity always has a certain player in mind - learn how to avoid antipatterns by ensuring simplicity themes do not compromise your productivity by over-focusing on machine efficiencies.","https://about.gitlab.com/blog/second-law-of-complexity-dynamics","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"When the pursuit of simplicity creates complexity in container-based CI pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2022-05-24\",\n      }",{"title":2713,"description":2709,"authors":2714,"heroImage":2478,"date":2715,"body":2716,"category":14,"tags":2717},"When the pursuit of simplicity creates complexity in container-based CI pipelines",[1701],"2022-05-24","\n\nIn a GitLab book club, I recently read \"[The Laws of Simplicity](http://lawsofsimplicity.com/),\" a great book on a topic that has deeply fascinated me for many years. The book contains an acronym that expresses simplicity generation approaches: SHE, which stands for \"shrink, hide, embody.\" These three approaches for simplicity generation all share a common attribute: They are all creating illusions - not eliminations.\n\nI've seen this illusion repeat across many, many realms of pursuit for many years. Even in human language, vocabulary development, jargon, and acronyms all simply encapsulate worlds of complexity that still exist, but can be more easily referenced in a compact form that performs SHE on the world of concepts.\n\nAny illusion has a boundary or curtain where in front of the curtain the complexity can be dealt with by following simple rules, but, behind the curtain, the complexity must be managed by a stage manager. \n\nFor instance, when the magic show creates the spectre of sawing people in half, what appears to be a simple box is in fact an exceedingly elaborate contraption. Not only that, but the manufacturing process for an actual simple box and the sawing box are markedly different in terms of complexity. The manufacturing of complexity and its result are essentially the tradeoff for what would be the real-world complexity of actually sawing people in half and having them heal and stand up unharmed immediately afterward.\n\nTo bring this into the technical skills realm, consider that when you leverage a third-party component or API to add functionality, you only need to know the parameters to obtain the desired result. The people maintaining that component or API must know the quantum mechanics detail level of how to perform that work in a reliable and complete way.\n\nDocker containers are a mechanism for embodying complexity, and are used in scaled applications and within container-based CI. When a [CI/CD](/topics/ci-cd/) automation engineer uses container-based CI, it is possible to make things more complex and more expensive when attempting to do exactly the opposite.\n\nAt its core, this post is concerned with how it can happen that pursuing a simpler world through containers can turn into an antipattern - a reversal of desired outcomes - many times, without us noticing that the reversal is affecting our productivity. The prison of a paradigm is secure indeed.\n\n### The Second Law of Complexity Dynamics\n\nOver the years I have come to believe that the pursuit of reducing complexity has similar characteristics to [The Second Law of Thermodynamics](https://www.grc.nasa.gov/www/k-12/airplane/thermo2.html). The net result of a change between mass and energy results in the the same net amount of mass and energy, but their ratio and form have changed. In what I will coin \"The Second Law of Complexity Dynamics,\" complexity is similarly \"conserved,\" it is just reformed.\n\nIf complexity is not eliminated by simplifying efforts, we reduce its impact in a given realm by changing the ratio of complexity and simplicity on each side of one or more curtains. But alas, complexity did not die, it just hid and is now someone else's management challenge. It is important not to think of this as cheating. There is no question that hiding complexity carries the potential for massive efficiency gains when the world behind the hiding mechanisms becomes the realm of specialty skills and specialists. When it truly externalizes the complexity management for one party, the world becomes more simple for that party.\n\nHowever, the devil is in the details. If the hypothesis of \"no net elimination of complexity\" is correct, it is then important where the complexity migrates to. If it migrates to another part of the same process that must also be managed by the same people, then it may not result in a net gain of efficiency. If it migrates out of a previously embodied realm, then, in the pusuit of simplicity, we can actually reduce our overall efficiency when the process is considered as a whole.\n\n### Container-based CI pipelines as a useful case in point\n\nI see the potential for efficiency reversals to crop up in my daily work time and again, and an interesting place where I've seen it lately is in the tradeoff of linking together hyper-specialized modules of code in containers for CI versus leveraging more generalized modules.\n\nIn creating container-based pipelines, I experience the potential for an efficiency reversal I have to consciously manage.\n\nContainers make a simplicity tradeoff by design. They create a full runtime environment for a very single purpose but in doing so they strip back the container internals so far that general compute tasks are difficult inside them. If you step behind their \"complexity embodying\" curtain into the container, their simplistic environment can require more complex code to operate within.\n\nIn GitLab CI pipelines that utilize containers, all the scripts of jobs run inside the containers that are specified as their runtime environment. When one selects a specialized container - such as the alpine git container or the skopeo image management container - the code is subject to the limitations of the shell that container employs (if it has one at all).\n\nContainers were devised to be hyper-specialized, purpose-specific runtimes that assure they can always run and run quickly for scaled applications. However, for many containers this means no shell or a very stripped back shell like busybox sh. It frequently also means not including the package manager for the underlying Linux distribution.\n\nTime and again, I've found myself degrading the implementation of my shell code in key ways that make it more complex, so that it can run under these stripped back shells. In these cases, I do not benefit from the complexity hiding of newer versions of advanced shells like Bash v5. One of the areas is advanced Bash shell expansions, which embody a huge world of complex parsing and avoid a bunch of extraneous utilities. And another is advanced if and case statement comparison logic that processes regular expressions without external utilities and performs many other abstracted comparisons. There are many other areas of the language where this comes into play, but these two stand out.\n\n![](https://about.gitlab.com/images/blogimages/second-law-of-complexity-dynamics-container-pipeline-tradeoffs.png)\n\nSo by having a simpler shell like busybox sh, the simplicities of advanced shell features become *unhidden* and join my side of the curtain. Now I have to manage them in my code. But then, guess what? No package manager means the inability to install other Linux utilities and languages extensions that I could also employ to push that same complexity back out of my space. And, of course, it means installing Bash v5 would be difficult as well.\n\nSo the simplicity proposition of a tightly optimized purpose-specific container can reverse the purported efficiency gains in the very important realm of the code I have to write. It also means I frequently have to break up my code into multiple jobs to utilize the specializations of these containers in a sequence or to transport the results of a specialized container into a fuller coding environment. This increases the complexity of the pipeline as I now have to pass artifacts and variable data from one job to another with a host of additional YAML directives, and sometimes deploy infrastructure (e.g., [Runner caching](https://docs.gitlab.com/ee/ci/caching/#:~:text=For%20runners%20to%20work%20with,GitLab.com%20behave%20this%20way)).\n\nIn the case of CI using containers, when the simplicity tradeoffs move complexity to things I do not maintain, such as base containers, operating system packages, and full shell environments, into things I do maintain, such as CI YAML and Shell Script code, then I am also inheriting long-term complexity maintenance. In the cloud, we know this as undifferentiated heavy lifting.\n\nInterestingly, the proliferation of specialized containers can also require more machine resources and can lengthen processing time as containers are retrieved from registries and loaded and artifacts and source code are copied in and out of each job-based container.\n\n### Simplicity target: Efficiency\n\nIt's easy to lose sight of the amount of human effort and ingenuity being applied to knowing and managing the coding structure, rather than being applied to solving the real automation problems of the CI pipeline. The net complexity of the pipeline can also mean it is hard to maintain an understanding of it even if you are working in it every day - and for newcomers onboarding, it can be many weeks before they fully understand how the system works.\n\nOf course, I can create my own containers for CI pipelines, but now I've added the complexity of container development and continuous updates of the same in order for my pipeline code to be operational and stay healthy. I am still behind the curtain for that container. For teams whose software is not itself containerized, the prospect of learning to build containers just for CI can create a lot of understandable friction to adopting a container-based CI development process. This friction may be unnecessary if we make a key heuristic adaptation.\n\n### Walking the tightwire above the curtain\n\nSo how do I manage the tensions of these multiple worlds of complexity when it comes to container-based pipelines to try to avoid efficiency reversals in the net complexity of the pipeline?\n\nIt is simple. I will describe the method and then the key misapplied heuristic and how to adjust it.\n\n1. I hold that the primary benefits of container-based CI are a) dependency isolation by job (so that you don’t have a massive and brittle CI build machine specification to handle all possible build requirements), and b) clean CI build agent state by obtaining a clean container copy for each job. These benefits do not imply having to abide by microservices container resource planning and doing so is what creates an antipattern in my productivity.\n\n2. I frequently use a Bash 5 container (version pegged if need be) where all the complexity that advanced shell capabilities embody for me stay behind the curtain.\n\n3. Instead of running a hyper-minimalized container for a given utility, I do a runtime install of that utility (gasp!) in a container that has my rich shell. I utilize version pegging during the install if I feel version safety is paramount on the utility. Alternatively, if a very desirable runtime of some type is difficult to setup and does not have a package, I look for a container that has a package manager that matches a packaged version of the runtime and also allows me to install my advanced scripting language if needed.\n\n4. If, and only if, the net time of the needed runtime installs exceeds the net pipeline time to load a string of specialized containers (with artifact handling) plus my time to develop and manage a pipeline dependency in the form of a custom container, then do I consider possibly creating a pipeline specific container.\n\n5. Through this process a balancing principle also emerges. Since I have been doing runtime installs as a development practice, I have actually already MVPed what a pipeline specific container would need to have installed. I can literally copy the installation lines into a Docker file if I wish. I can also notice if I have commonality across multiple pipelines where it makes sense to create a multi-pipeline utility container.\n\nIn a recent project, following these principles caused me to avoid the skopeo container and instead install it on the Bash 5 container using a package manager.\n\nIf your team is big into Python or PowerShell as your CI language, it would make sense to start with recent releases of those containers. The point is not advanced Bash -but an advanced version of your general CI scripting language that prevents you from creating work arounds in your code for problems that are well-solved in publicly available runtimes.\n\nKeep in mind that this adjustment is very, very focused on containers **in CI pipelines**, which, by nature, reflect general compute processing requirements where many vastly different operations are required in a pipeline. I am not advocating this approach for true microservices applications where, by design, a given service has very defined purpose and characteristics and, at scale, massively benefits from the machine efficiency of hyper-minimalized, purpose-specific granularity.\n\n### Misapplied heuristics\n\nFrequently when a pattern has an inflection point at which it becomes an antipattern, it is due to misapplying the heuristics of the wrong realm. In this case, I believe, that normal containerization patterns for microservices apps are well founded, but they apply narrowly to \"engineered hyper-specialized compute\" of a granule we call \"a microservice\" (note the word \"micro\" applies to the scope of compute activities). Importantly, they apply because the process itself is designed as hyper-specialized around a very specific task. The container contents (included dependencies), immutability principle (no runtime change), and the runtime compute resources can be managed exceedingly minimally because of the small and highly specific scope of computing activities that occur within the process.\n\nThis is essentially the embodiment of the 12 Factor App principle called “[VIII. Concurrency](https://12factor.net/concurrency),” which asserts that scaling should be horizontal scaling of the same minimalized process, not vertical scaling of compute resources inside a given process. If the system experiences 10x work for a particular activity, we create 10 processes, we do not request 10x memory and 10x CPU within one running process. Microservices architecture tightly controls the amount of work in each request so that it is hyper-predictable in its compute resource requirements and, therefore, scalable by adding identical processes.\n\nCI compute, by nature, is the opposite of hyper-specialized. Across build, test, package, deploy, etc., etc., there are many huge variations in required machine resources of memory, CPU, network I/O and high-speed disk access and, importantly, included dependencies. The generalized compute nature also occurs due to varying inputs so the same defined process might need a lot more resources due to the nature of the raw input data. For example, varying input volume (e.g. a lot versus few data items) or varying input density (e.g. processing binary files versus text files). \n\nIt is the process that is being containerized that holds the attribute of generalized compute (bursty on at least some compute resources) or hyper-specialized (narrow definition of work to be done and therefore well-known compute resources per unit of completed work). Containerizing a process that exhibits generalized compute requirements is useful, but planning the resources of that container as if containerizing it has transformed the compute requirements into hyper-minimalized is the inflection point at which it becomes an antipattern, actually eroding the sought-after benefits we set out to create.\n\nIn the model I employ for leveraging containers in CI, the loosening of the hyper-specialization, immutablility (no-runtime installs), and very narrow compute resources principles of microservices simply reflects the real world in that CI compute as a whole exhibits the nature of generalized, not hyper-specialized, compute characteristics.\n\n> Another realm where this seems true is desired state configuration management technologies - also known as “Configuration as Code”. It is super simple if there are pre-existing components or recipes for all that you need to do but as soon as you have to build some for yourself, you enter a world of creating imperative code against a declarative API boundary (there's the \"embodiment\" curtain - the declarative API boundary). Generally, if you have not had to implement imperative code to process declaratively, this new world takes some significant experience to become proficient.\n\n### Iterating SA: Experimental improvements for your next project\n\n1. In general, favor simplicity boundaries that reduce your work, especially in the realm of undifferentiated heavy lifting. In the realm of container-based CI, this includes having a rich coding language and a package manager to acquire additional complexity embodying utilities quickly and easily.\n\n2. In general, be suspicious of an underlying antipattern if you have to spend an inordinate amount of time coding and maintaining workarounds in the service of simplicity. In the realm of container-based CI, this would be containers that are ultra-minimalized around microservices performance characteristics when they don’t hyper-scale as a standing service within CI.\n\n3. In general, stand back and examine the net complexity of the code and frameworks that will have to be maintained by yourself or your team and check if you’ve made tradeoffs that have a net negative tax on your efficiency. When complexity that can be managed by machines enters your workspace at high frequency, then you have a massive antipattern of human efficiency.\n\n4. It is frequent that when the hueristics being applied create negative human efficiency they also create negative machine efficiency. Watch for this effect in your projects. The diagram in the post shows that over-minimalized containers can easily lead to using a lot more of them - all of which has machine overhead as well.\n\nIf the above resonates, CI pipeline engineers might want to consider loosening the \"microservices\" heuristics of hyper-specialization, ultra-minimalization,  and immutability (no dynamic installs) for CI pipeline containers in order to ensure that the true net complexity level of the code they have to maintain is in balance and their productivity is preserved.\n\n### Appendix: Working examples of this idea\n\n- [AWS CLI Tools in Containers](https://gitlab.com/guided-explorations/aws/aws-cli-tools) has both Bash and PowerShell Core (on Linux OS) available so that one container set can suit the automation shell preference of both Linux and Windows heritage CI automation engineers.\n\n- CI file [installs yq dynamically](https://gitlab.com/guided-explorations/gl-k8s-agent/gitops/envs/world-greetings-env-1/-/blob/main/.gitlab-ci.yml#L47-48) in the Bash container, but then [only installs the heavier jq and skopeo](https://gitlab.com/guided-explorations/gl-k8s-agent/gitops/envs/world-greetings-env-1/-/blob/main/.gitlab-ci.yml#L63) if needed by the work implied, which demonstrates a way to be more efficient even when runtime installs are desired.\n\n- [Bash and PowerShell Script Code Libraries in Pure GitLab CI YAML](https://gitlab.com/guided-explorations/ci-cd-plugin-extensions/script-code-libraries-in-pure-gitlab-ci-yaml) shows how to have libraries of CI script code available to every container in a pipeline without encapsulating the libraries in a container themselves and with minimalized CI YAML complexity compared to YAML anchors, references, or extends. While the method is a little bit challenging to setup, from then on out it pays back by decoupling scripting libraries from any other pipeline artifact.\n\n- [CI/CD Extension Freemarker File Templating](https://gitlab.com/guided-explorations/ci-cd-plugin-extensions/ci-cd-plugin-extension-freemarker-file-templating) shows the install is very quick and only affects one job and still version pegs the installed utility.\n",[832,937,726,1084,750],{"slug":2719,"featured":6,"template":678},"second-law-of-complexity-dynamics","content:en-us:blog:second-law-of-complexity-dynamics.yml","Second Law Of Complexity Dynamics","en-us/blog/second-law-of-complexity-dynamics.yml","en-us/blog/second-law-of-complexity-dynamics",{"_path":2725,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2726,"content":2732,"config":2737,"_id":2739,"_type":16,"title":2740,"_source":17,"_file":2741,"_stem":2742,"_extension":20},"/en-us/blog/how-we-removed-all-502-errors-by-caring-about-pid-1-in-kubernetes",{"title":2727,"description":2728,"ogTitle":2727,"ogDescription":2728,"noIndex":6,"ogImage":2729,"ogUrl":2730,"ogSiteName":692,"ogType":693,"canonicalUrls":2730,"schema":2731},"How we reduced 502 errors by caring about PID 1 in Kubernetes","For every deploy, scale down event, or pod termination, users of GitLab's Pages service were experiencing 502 errors. This explains how we found the root cause and rolled out a fix for it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682305/Blog/Hero%20Images/KubeCon2022.jpg","https://about.gitlab.com/blog/how-we-removed-all-502-errors-by-caring-about-pid-1-in-kubernetes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we reduced 502 errors by caring about PID 1 in Kubernetes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Steve Azzopardi\"}],\n        \"datePublished\": \"2022-05-17\",\n      }",{"title":2727,"description":2728,"authors":2733,"heroImage":2729,"date":2734,"body":2735,"category":14,"tags":2736},[2596],"2022-05-17","\n\n_This blog post and linked pages contain information related to upcoming products, features, and functionality. It is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog post and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc._\n\nOur [SRE on call](https://about.gitlab.com/handbook/engineering/infrastructure/incident-management/#engineer-on-call-eoc-responsibilities)\nwas getting paged daily that one of our\n[SLIs](https://www.youtube.com/watch?v=tEylFyxbDLE) was\nburning through our\n[SLOs](https://www.youtube.com/watch?v=tEylFyxbDLE) for the [GitLab\nPages](https://docs.gitlab.com/ee/user/project/pages/) service. It was\nintermittent and short-lived, but enough to cause user-facing impact which we\nweren't comfortable with. This turned into alert fatigue because there wasn't\nenough time for the SRE on call to investigate the issue and it wasn't\nactionable since it recovered on its own.\n\nWe decided to open up an [investigation issue](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/15497)\nfor these alerts. We had to find out what the issue was since we were\nshowing `502` errors to our users and we needed a\n[DRI](https://about.gitlab.com/handbook/people-group/directly-responsible-individuals/)\nthat wasn't on call to investigate.\n\n## What is even going on?\n\nAs an [SRE](https://handbook.gitlab.com/job-families/engineering/infrastructure/site-reliability-engineer/)\nat GitLab, you get to touch a lot of services that you didn't build yourself and\ninteract with system dependencies that you might have not touched before.\nThere's always detective work to do!\n\nWhen we looked at the GitLab Pages logs we found that it's always returning\n[`ErrDomainDoesNotExist`](https://gitlab.com/gitlab-org/gitlab-pages/-/blob/e1f1effa23c520d3b8b717d831ccab7ba3dd494f/internal/routing/middleware.go#L22-26)\nerrors which result in a `502` error to our users. GitLab Pages [sends a request](https://gitlab.com/gitlab-org/gitlab-pages/-/blob/e1f1effa23c520d3b8b717d831ccab7ba3dd494f/internal/source/gitlab/client/client.go#L101-127)\nto [GitLab Workhorse](https://docs.gitlab.com/ee/development/workhorse/),\nspecifically the `/api/v4/internal/pages` route.\n\nGitLab Workhorse is a Go service in front of our Ruby on Rails monolith and\nit's deployed as a [sidecar](https://www.magalix.com/blog/the-sidecar-pattern)\ninside of the `webservice pod`, which runs Ruby on Rails using the `Puma` web\nserver.\n\nWe used the internal IP to correlate the GitLab Pages requests with GitLab Workhorse\ncontainers. We looked at multiple requests and found that all the 502 requests\nhad the following error attached to them: [`502 Bad Gateway with dial tcp 127.0.0.1:8080: connect: connection refused`](https://gitlab.com/gitlab-org/gitlab/-/blob/f64be48cc737f5d12c1c30f724af540a836dcc94/workhorse/internal/badgateway/roundtripper.go#L43).\nThis means that GitLab Workhorse couldn't connect to the Puma web server. So we\nneeded to go another layer deeper.\n\nThe Puma web server is what runs the Ruby on Rails monolith which has an\ninternal API endpoint but Puma was never getting these requests since it wasn't\nrunning. What this tells us is that Kubernetes kept our pod in the\n[service](https://kubernetes.io/docs/concepts/services-networking/service/)\neven when Puma wasn't responding, despite having [readiness probes](https://gitlab.com/gitlab-org/charts/gitlab/-/blob/4bb638bccc6a676f9fdd5bbf800f7d2b977efd55/charts/gitlab/charts/webservice/templates/deployment.yaml#L279-287)\nconfigured.\n\nBelow is the request flow between GitLab Pages, GitLab Workhorse, and Puma/Webservice to try and make it more clear:\n\n![overview of the request flow](https://about.gitlab.com/images/blogimages/how-we-removed-all-502-errors-by-caring-about-pid-1-in-kubernetes/overview.png){: .shadow.center}\n\n## Attempt 1: Red herring\n\nWe shifted our focus on GitLab Workhorse and Puma to try and understand how\nGitLab Workhorse was returning 502 errors in the first place. We found some\n`502 Bad Gateway with dial tcp 127.0.0.1:8080: connect: connection refused`\nerrors during container startup time. How could this be? With the readiness\nprobe, the pod shouldn't be added to the\n[Endpoint](https://kubernetes.io/docs/concepts/services-networking/service/#over-capacity-endpoints)\nuntil [all readiness probes pass](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/15497#note_899321775).\nWe later found out that it's because of a [polling\nmechanisim](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/15497#note_899629314)\nthat we have for [Geo](https://docs.gitlab.com/ee/administration/geo/) which\nruns in the background, using a Goroutine in GitLab Workhorse, and pings Puma for Geo information.\nWe don't have Geo enabled on GitLab.com so we [simply disabled it](https://gitlab.com/gitlab-com/gl-infra/k8s-workloads/gitlab-com/-/merge_requests/1670)\nto reduce the noise.\n\nWe removed the 502 errors, but not the ones we want, just a red herring.\n\n## Attempt 2: Close but not quite\n\nAt this time, we were still burning through our SLO from time to time, so this\nwas still an urgent thing that we needed to fix. Now that we had cleaner logs for\n`502` errors it started to become a bit clearer that this is happening on pod\ntermination:\n\n```\n2022-04-05 06:03:49.000 UTC: Readiness probe failed\n2022-04-05 06:03:51.000 UTC: Puma (127.0.0.1:8080) started shutdown.\n2022-04-05 06:04:04.526 UTC: Puma shutdown finished.\n2022-04-05 06:04:04.000 UTC - 2022-04-05 06:04:46.000 UTC: workhorse started serving 502 constantly.  42 seconds of serving 502 requests for any request that comes in apart from /api/v4/jobs/request\n```\n\nIn the timeline shown above, we see that we've kept serving requests well after\nour `Puma`/`webservice` container exited, and the first readiness probe failed.\nIf we look at the readiness probes we had on that pod we see the following:\n\n```\n$ kubectl -n gitlab get po gitlab-webservice-api-785cb54bbd-xpln2 -o jsonpath='{range .spec.containers[*]} {@.name}{\":\\n\\tliveness:\"} {@.livenessProbe} {\"\\n\\treadiness:\"} {@.readinessProbe} {\"\\n\"} {end}'\n webservice:\n        liveness: {\"failureThreshold\":3,\"httpGet\":{\"path\":\"/-/liveness\",\"port\":8080,\"scheme\":\"HTTP\"},\"initialDelaySeconds\":20,\"periodSeconds\":60,\"successThreshold\":1,\"timeoutSeconds\":30}\n        readiness: {\"failureThreshold\":3,\"httpGet\":{\"path\":\"/-/readiness\",\"port\":8080,\"scheme\":\"HTTP\"},\"initialDelaySeconds\":60,\"periodSeconds\":10,\"successThreshold\":1,\"timeoutSeconds\":2}\n  gitlab-workhorse:\n        liveness: {\"exec\":{\"command\":[\"/scripts/healthcheck\"]},\"failureThreshold\":3,\"initialDelaySeconds\":20,\"periodSeconds\":60,\"successThreshold\":1,\"timeoutSeconds\":30}\n        readiness: {\"exec\":{\"command\":[\"/scripts/healthcheck\"]},\"failureThreshold\":3,\"periodSeconds\":10,\"successThreshold\":1,\"timeoutSeconds\":2}\n```\n\nThis meant that for the `webservice` pod to be marked unhealthy and removed\nfrom the endpoints, Kubernetes had to get 3 consecutive failures with an\ninterval of 10 seconds, so in total that's 30 seconds. That seems a bit slow.\n\nOur next logical step was to reduce the `periodSeconds` for the readiness probe\nfor the `webservice` pod so we don't wait 30 seconds before removing the pod\nfrom the service when it becomes unhealthy.\n\nBefore doing so we had to understand if sending more requests to `/-/readiness`\nendpoint would have any knock-on effect with using more memory or anything\nelse. We had to [understand what the `/-/readiness` endpoint was doing](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/15497#note_903812722)\nand if it was safe to increase the frequency at which we send requests. We\ndecided it was safe, and after enabling it on\n[staging](https://gitlab.com/gitlab-com/gl-infra/k8s-workloads/gitlab-com/-/merge_requests/1686#note_903877755),\nand\n[canary](https://gitlab.com/gitlab-com/gl-infra/k8s-workloads/gitlab-com/-/merge_requests/1688#note_904501848)\nwe didn't see any increase in CPU/Memory usage, as expected, and saw an\nimprovement in the removal of 502 errors, which made us more confident that\nthis was the issue. We rolled this out to\n[production](https://gitlab.com/gitlab-com/gl-infra/k8s-workloads/gitlab-com/-/merge_requests/1689)\nwith high hopes.\n\nAs usual, Production is a different story than Staging or Canary, and it showed\nthat it didn't remove all the 502 errors, just [enough to stop triggering the SLO every day](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/15497#note_905993144),\nbut at least we removed the alert fatigue on the SRE on call. We were close, but not quite.\n\n## Attempt 3: All gone!\n\nAt this point, we were a bit lost and weren't sure what to look at next. We had\na bit of tunnel vision and kept focusing/blaming that we aren't removing the\nPod from the `Endpoint` quickly enough. We even looked at [Google Cloud Platform\nNEGs](https://cloud.google.com/kubernetes-engine/docs/how-to/standalone-neg) to\nsee if we could have faster readiness probes and remove the pod quicker. However,\nthis wasn't ideal [because we wouldn't have solved this for our self-hosting customers](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/15497#note_908359286)\nwhich seem to be facing the same [problem](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/2943).\n\nWhile researching we also came across a known problem with [running `Puma` in\nKubernetes](https://github.com/puma/puma/blob/bf2548ce300c2b4f671582bc756dcec5861e815f/docs/kubernetes.md),\nand thought that might be the solution. However, we already implemented a\n[blackout window](https://gitlab.com/gitlab-org/charts/gitlab/-/blob/c1b63f3a4867886bc1212d86985fc70e66b717c5/charts/gitlab/charts/webservice/templates/deployment.yaml#L223-224)\njust for this specific reason, so it couldn't be that either...in other words, it was another dead end.\n\nWe took a step back and looked at the [timelines one more time](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/15497#note_910106152)\nand then it hit us. The Puma/webservice container is terminating within a\nfew seconds, but the GitLab Workhorse one is always taking 30 seconds. Is it because\nof the [long polling from GitLab Runner](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/21698)? 30 seconds\nis a \"special\" number for Kubernetes [pod termination](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination).\nWhen Kubernetes deletes a pod it firsts sends the `TERM` signal to the\ncontainer and waits 30 seconds, if the container hasn't exited yet, it will\nsend a `KILL` signal. This indicated that maybe GitLab Workhorse was never\nshutting down and Kubernetes had to kill it.\n\nOnce more we looked at GitLab Workhorse source code and [searched for the `SIGTERM` usage](https://gitlab.com/gitlab-org/gitlab/-/blob/d66f10e169a08cedcbfe70e3ea46cbfbb20d972d/workhorse/main.go#L238-258)\nand it did seem to support [graceful termination](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/62701) and\nit also had explicit logic about long polling requests, so is this just another\ndead end? Luckily when the `TERM` signal is sent, Workhorse [logs a message that\nit's shutting down](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/62701). We looked\nat our logs for this specific message and didn't see anything. Is this it? We\naren't gracefully shutting down? But how? Why does it result in 502 errors?\nWhy do the GitLab Pages keep using the same pod that is terminating?\n\nWe know that the `TERM` signal is being sent to PID 1 inside of the container,\nand that process should handle the `TERM` signal for graceful shutdown. We\nlooked at the GitLab Workhorse process tree and this is what we found:\n\n```sh\ngit@gitlab-webservice-default-5d85b6854c-sbx2z:/$ ps faux\nUSER         PID %CPU %MEM    VSZ   RSS TTY      STAT START   TIME COMMAND\nroot        1015  0.0  0.0 805036  4588 ?        Rsl  13:12   0:00 runc init\ngit         1005  0.3  0.0   5992  3784 pts/0    Ss   13:12   0:00 bash\ngit         1014  0.0  0.0   8592  3364 pts/0    R+   13:12   0:00  \\_ ps faux\ngit            1  0.0  0.0   2420   532 ?        Ss   12:52   0:00 /bin/sh -c /scripts/start-workhorse\ngit           16  0.0  0.0   5728  3408 ?        S    12:52   0:00 /bin/bash /scripts/start-workhorse\ngit           19  0.0  0.3 1328480 33080 ?       Sl   12:52   0:00  \\_ gitlab-workhorse -logFile stdout -logFormat json -listenAddr 0.0.0.0:8181 -documentRoot /srv/gitlab/public -secretPath /etc/gitlab/gitlab-workhorse/secret -config /srv/gitlab/config/workhorse-config.toml\n```\n\nBingo! `gitlab-workhorse` is PID 19 in this case, and a child process of a\n[script](https://gitlab.com/gitlab-org/build/CNG/-/blob/92d3e22e9ff6c5cbb685aeea99813751d5e19a9d/gitlab-workhorse/Dockerfile#L51)\nthat we invoke. Taking a close look at the\n[script](https://gitlab.com/gitlab-org/build/CNG/-/blob/92d3e22e9ff6c5cbb685aeea99813751d5e19a9d/gitlab-workhorse/scripts/start-workhors)\nwe check if it listens to `TERM` and it doesn't! So far everything indicated\nthat GitLab Workhorse was never getting the `TERM` signal which ended up in receiving\n`KILL` after 30 seconds. We updated our `scripts/start-workhorse` to use\n[`exec(1)`](https://linux.die.net/man/1/exec) so that `gitlab-workhorse`\nreplaced the PID of our bash script, that should have worked, right? When we tested\nthis locally we then saw the following process tree.\n\n```\ngit@gitlab-webservice-default-84c68fc9c9-xcsnm:/$ ps faux\nUSER         PID %CPU %MEM    VSZ   RSS TTY      STAT START   TIME COMMAND\ngit          167  0.0  0.0   5992  3856 pts/0    Ss   14:27   0:00 bash\ngit          181  0.0  0.0   8592  3220 pts/0    R+   14:27   0:00  \\_ ps faux\ngit            1  0.0  0.0   2420   520 ?        Ss   14:24   0:00 /bin/sh -c /scripts/start-workhorse\ngit           17  0.0  0.3 1328228 32800 ?       Sl   14:24   0:00 gitlab-workhorse -logFile stdout -logFormat json -listenAddr 0.0.0.0:8181 -documentRoot /srv/gitlab/public -secretPath /etc/gitlab/gitlab-workhorse/secret -config /srv/gitlab/config/workhorse-config.toml\n```\n\nThis changed a bit: this shows that `gitlab-workhorse` was no longer a child\nprocess of `/scripts/start-workhorse` however `/bin/sh` was still PID 1. What is even\ninvoking `/bin/sh` that we didn't see anywhere in our\n[Dockerfile](https://gitlab.com/gitlab-org/build/CNG/-/blob/92d3e22e9ff6c5cbb685aeea99813751d5e19a9d/gitlab-workhorse/Dockerfile)?\nAfter some thumb-twiddling, we had an idea that the container runtime is invoking\n`/bin/sh`. We went back to basics and looked at the\n[`CMD`](https://docs.docker.com/engine/reference/builder/#cmd) documentation to\nsee if we were missing something, and we were. We read the following:\n\n> If you use the shell form of the CMD, then the \u003Ccommand> will execute in `/bin/sh -c`:\n>\n> ```\n> FROM ubuntu\n> CMD echo \"This is a test.\" | wc -\n> ```\n>\n> If you want to run your \u003Ccommand> without a shell then you must express the command as a JSON array and give the full path to the executable. This array form is the preferred format of CMD. Any additional parameters must be individually expressed as strings in the array:\n>\n> ```\n> FROM ubuntu\n> CMD [\"/usr/bin/wc\",\"--help\"]\n> ```\n\nThis was exactly [what we were doing](https://gitlab.com/gitlab-org/build/CNG/-/blob/92d3e22e9ff6c5cbb685aeea99813751d5e19a9d/gitlab-workhorse/Dockerfile#L51)! \nwe weren't using `CMD` in `exec form`, but in `shell form`. Changing this confirmed\nthat `gitlab-workhorse` is now PID 1, and also receives the termination signal\nafter testing it locally:\n\n```\ngit@gitlab-webservice-default-84c68fc9c9-lzwmp:/$ ps faux\nUSER         PID %CPU %MEM    VSZ   RSS TTY      STAT START   TIME COMMAND\ngit           65  1.0  0.0   5992  3704 pts/0    Ss   15:25   0:00 bash\ngit           73  0.0  0.0   8592  3256 pts/0    R+   15:25   0:00  \\_ ps faux\ngit            1  0.2  0.3 1328228 32288 ?       Ssl  15:24   0:00 gitlab-workhorse -logFile stdout -logFormat json -listenAddr 0.0.0.0:8181 -documentRoot /srv/gitlab/public -secretPath /etc/gitlab/gitlab-workhorse/secret -config /srv/gitlab/config/workhorse-config.toml\n```\n\n```\n{\"level\":\"info\",\"msg\":\"shutdown initiated\",\"shutdown_timeout_s\":61,\"signal\":\"terminated\",\"time\":\"2022-04-13T15:27:57Z\"}\n{\"level\":\"info\",\"msg\":\"keywatcher: shutting down\",\"time\":\"2022-04-13T15:27:57Z\"}\n{\"error\":null,\"level\":\"fatal\",\"msg\":\"shutting down\",\"time\":\"2022-04-13T15:27:57Z\"}\n```\n\nOk, then we just needed to update `exec` and `CMD []` and we would have been\ndone, right? Almost. GitLab Workhorse proxies all of the requests for the API, Web, and Git requests so we couldn't just do a big change and expect that everything is going to be OK. We had to progressively roll this out to make\nsure we didn't break any existing working behavior since this affects all the\nrequests we get to GitLab.com. To do this, we hid it behind a [feature\nflag](https://gitlab.com/gitlab-org/build/CNG/-/merge_requests/972) so GitLab\nWorkhorse is only PID 1 when the `GITLAB_WORKHORSE_EXEC` environment variable\nis set. This allowed us to deploy the change and only enable it on a small part\nof our fleet to see if we see any problems. We were a bit more careful here and\nrolled it out zone by zone in Production since we run on 3 zones. When we\nrolled it out in the [first\nzone](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/15497#note_919259030)\nwe saw all 502 errors disappear! After fully rolling this out we see that [the\nproblem is fixed and it had no negative side\neffects](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/15497#note_920585707). Hurray!\n\nWe still had one question unanswered, why were GitLab Pages still trying to use\nthe same connection even after the Pod was removed from the Service because it was\nscheduled for deletion? When we looked at Go internals we see that [Go reuses\nTCP connections](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/15497#note_920642770)\nif we close the body of the request. So even though it's not part of the Service\nwe can still keep the TCP connection open and send requests – this explains why\nwe kept seeing 502 on pod being terminated and always from the same GitLab\nPages pod.\n\nNow it's all gone!\n\n## More things that we can explore\n\n1. We've made graceful termination for GitLab Workhorse as [default](https://gitlab.com/gitlab-com/gl-infra/k8s-workloads/gitlab-com/-/merge_requests/1732).\n1. Audit all of our Dockerfiles that use `CMD command` and fix them. We've found 10, and [fixed all of them](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/3249).\n1. [Better readiness Probe defaults for `webservice` pod](https://gitlab.com/gitlab-org/charts/gitlab/-/merge_requests/2518).\n1. Add [linting](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/3253) for Dockerfiles.\n1. See if any of our child processes need [zombie process reaping](https://blog.phusion.nl/2015/01/20/docker-and-the-pid-1-zombie-reaping-problem/).\n\n## Takeaways\n\n1. We should always care about what is PID 1 in a container.\n1. Always try and use `CMD [\"executable\",\"param1\",\"param2\"]` in your Dockerfile.\n1. Pods are removed from the Service/Endpoint in async.\n1. If you are on GKE [NEGs](https://cloud.google.com/kubernetes-engine/docs/how-to/standalone-neg) might be better for readinessProbes.\n1. By default, there is a 30 second grace period between the `TERM` signal and the `KILL` signal when Pods terminate. You can update the time between the signals `terminationGracePeriodSeconds`.\n1. The Go `http.Client` will reuse the TCP connection if [the body is closed](https://cs.opensource.google/go/go/+/refs/tags/go1.18.2:src/net/http/response.go;l=59-64) which in this case made the issue worse.\n\nThank you to [@igorwwwwwwwwwwwwwwwwwwww](https://gitlab.com/igorwwwwwwwwwwwwwwwwwwww), [@gsgl](https://gitlab.com/gsgl), [@jarv](https://gitlab.com/jarv), and [@cmcfarland](https://gitlab.com/cmcfarland) for helping me debug this problem!\n\n",[1002],{"slug":2738,"featured":6,"template":678},"how-we-removed-all-502-errors-by-caring-about-pid-1-in-kubernetes","content:en-us:blog:how-we-removed-all-502-errors-by-caring-about-pid-1-in-kubernetes.yml","How We Removed All 502 Errors By Caring About Pid 1 In Kubernetes","en-us/blog/how-we-removed-all-502-errors-by-caring-about-pid-1-in-kubernetes.yml","en-us/blog/how-we-removed-all-502-errors-by-caring-about-pid-1-in-kubernetes",{"_path":2744,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2745,"content":2751,"config":2758,"_id":2760,"_type":16,"title":2761,"_source":17,"_file":2762,"_stem":2763,"_extension":20},"/en-us/blog/how-we-built-a-stack-overflow-community-questions-analyzer-and-you-can-too",{"title":2746,"description":2747,"ogTitle":2746,"ogDescription":2747,"noIndex":6,"ogImage":2748,"ogUrl":2749,"ogSiteName":692,"ogType":693,"canonicalUrls":2749,"schema":2750},"How We Built a Stack Overflow Community Questions Analyzer","We wanted to better understand what Stack Overflow GitLab Community members wanted to know, so we automated a way to keep track of it all. Here's a step-by-step look at how we did it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667552/Blog/Hero%20Images/gitlabonstackoverflow.png","https://about.gitlab.com/blog/how-we-built-a-stack-overflow-community-questions-analyzer-and-you-can-too","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we built a Stack Overflow Community questions analyzer (and you can too)\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Arias\"}],\n        \"datePublished\": \"2022-04-28\",\n      }",{"title":2752,"description":2747,"authors":2753,"heroImage":2748,"date":2755,"body":2756,"category":14,"tags":2757},"How we built a Stack Overflow Community questions analyzer (and you can too)",[2754],"William Arias","2022-04-28","\nBeing part of the GitLab collective is an opportunity to learn first hand about the challenges the community using the DevOps Platform is facing. As a [Collective Member](https://stackoverflow.com/collectives/gitlab) logging between 2-3 times a week in StackOverflow  reading the questions and discussion posted about GitLab and manually sorting them by 'Recent Activity', 'Trending' and using Dates, I asked myself:  how can we leverage this  wealth of data and discover feedback, while finding  the most frequent topics where the community has questions? \n\nThis would be an opportunity to get a quick overview of topics where the community regularly needs help; this would also make it easier for us to create relevant content for them.  Manually sorting and extracting the text of each question wouldn’t be sustainable, so creating an automated way would be the most efficient way to proceed.\n\n## Experimenting with data-oriented content creation\n\nFinding out what the community is working on, and what they need help with while using GitLab, can help us to create better educational content that could expand their understanding of GitLab. To achieve this goal, the solution I created  after a few iterations is depicted below:\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/fontes.png)\n\nWhere the Bill Of Materials consists mainly of:\n\n- GitLab DevOps Platform\n- Stackoverflow API\n- Kubernetes Cluster\n- Open Source Python libraries:\n- scikit-learn (TF-IDF)\n- Streamlit (front-end)\n- Spacy                 \n\nI leveraged the GitLab DevOps Platform to organize the projects using groups:\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/organize.png)\n\nThe Loader project pulls questions about GitLab from the StackOverflow API, pre-processes the text and makes it usable for a second project: a Visualizer to create customized dashboards. \n\nThe automated process executed using the DevOps Platform is outlined below: \n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/automatedprocess.png)\n\n- Pull data from [StackOverflow API](https://api.stackexchange.com/docs)\n- Preprocess the response extracting relevant fields from returned JSON\n- Build a corpus and calculate TF-IDF\n- Scan for security vulnerabilities\n- Review Application and display its resulting dashboards using [Streamlit](https://streamlit.io/)\n- Deploy the built application to a Kubernetes cluster\n\nLoader and Visualizer projects have their own codebase and pipelines, which is helpful if different teams need to work separately on them. However, one project can require the other, which raises the need for  cross-project  automation. \n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/pipeline.png)\n\nThis scenario means a [multi-project pipeline](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html) is useful to automate the whole process. The multi-project pipeline enables use cases such as:\n\n- As an NLP Developer I want to work on the NLP Pipeline in the Loader Project and automatically trigger the creation of a new visualization \n- As a Streamlit Developer I want to work independently in the buttons and data visualization without touching any NLP Pipeline backend  \n\nThe outlined process above is automatically run defining the steps in a [multi-project pipeline](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html) sharing artifact:\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/demo1-gif.gif)\n\n## Finding the most frequently occurring words\n\nThe Feature Engineering step will help me to analyze the text in the whole dataset of GitLab questions. Using a simple yet powerful technique – TF-IDF – we aim to find the most relevant terms utilized by the community. By using this technique in the pipeline execution,  I represent words in numerical values and later rank them in order of importance.  This approach serves as a baseline for further improvements. More detail about this algorithm can be found [here](https://en.wikipedia.org/wiki/Tf%E2%80%93idf).\n\n## Did we achieve any success?\n\nOne run of the multi-pipeline in our solution results in dashboards such as this one:\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/demo2-gif.gif)\n\nAs an end-user of these dashboards I can immediately conclude that the main source of questions are around GitLab CI, pipelines and usage of Docker images. Not bad for a first run!  Having the data processed enables us to ask more questions and use data to answer it, such as, what are the questions from the highest [StackOverflow reputation](https://stackoverflow.com/help/whats-reputation) users ? \n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/questions.png)\n\nCould these questions be inspiration for tutorials for the most advanced users, or the implementation of a new feature? \n\nBecause everyone can contribute, let's take a look at the users who just started gaining their StackOverflow reputation:\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/contributors.png)\n\nThe question about access and reading/writing permissions in Portuguese is interesting. It makes me wonder about content localization and GitLab meetups in Portuguese-speaking countries. Not surprisingly, there were also \tquestions about GitLab CI too as the text processing and ranking found most relevant in the corpus. \n\nDid we achieve any success? Yes, using a baseline technique such as TF-IDF sped up by  DevOps practices allowed us  to find out relevant terms and help us to understand where the majority of the community needs help in their DevOps journey. I have automated many steps that will allow me to focus on data exploration and possible implementation of more complex NLP Techniques rather than infrastructure allocation or manual input of commands and tests.\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/demo-reduced.gif)\n\nAs a Technical Marketing Manager, I want to create content that is relevant to enable or inspire the  community to succeed. \n\nA personal take away: Educating about the latest GitLab DevOps platform capabilities and the problems they solve  is important and so is keeping an eye on the content that might not be related to a new feature but is needed right now.\n\nAre we done? No, quoting Da Vinci's altered quote about [Art](https://www.artshub.com.au/news/features/art-is-never-finished-only-abandoned-262096-2370305/#:~:text=Lottie%20Consalvo%20in%20her%20studio,writers%2C%20and%20creatives%20would%20recognise) but with software: \"Software is never finished, only abandoned.\"\n\nThere is room for improvement and adding capabilities to this project. We continue iterating, listening to the community, and we encourage you to clone these projects, try it yourself, and adjust it with the topics that make sense to you. Create a merge request to improve the codebase and suggest new dashboards ideas!\n\nExplore the [group of projects](https://gitlab.com/tech-marketing/ad-fontes) and take a look at the [dashboard](https://bit.ly/3jeTFQp).\n",[894,268,1445],{"slug":2759,"featured":6,"template":678},"how-we-built-a-stack-overflow-community-questions-analyzer-and-you-can-too","content:en-us:blog:how-we-built-a-stack-overflow-community-questions-analyzer-and-you-can-too.yml","How We Built A Stack Overflow Community Questions Analyzer And You Can Too","en-us/blog/how-we-built-a-stack-overflow-community-questions-analyzer-and-you-can-too.yml","en-us/blog/how-we-built-a-stack-overflow-community-questions-analyzer-and-you-can-too",{"_path":2765,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2766,"content":2772,"config":2777,"_id":2779,"_type":16,"title":2780,"_source":17,"_file":2781,"_stem":2782,"_extension":20},"/en-us/blog/comparing-static-site-generators",{"title":2767,"description":2768,"ogTitle":2767,"ogDescription":2768,"noIndex":6,"ogImage":2769,"ogUrl":2770,"ogSiteName":692,"ogType":693,"canonicalUrls":2770,"schema":2771},"How to choose the right static site generator","Here's an in-depth look at 6 static site generators that deploy to GitLab Pages.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682290/Blog/Hero%20Images/kelly-sikkema-gchfxsdcmje-unsplash-resized.jpg","https://about.gitlab.com/blog/comparing-static-site-generators","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to choose the right static site generator\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fatima Sarah Khalid\"}],\n        \"datePublished\": \"2022-04-18\",\n      }",{"title":2767,"description":2768,"authors":2773,"heroImage":2769,"date":2774,"body":2775,"category":14,"tags":2776},[2092],"2022-04-18","\n\nMost websites today fall into two categories - dynamic sites and static sites:\n\n## Dynamic sites\n\nDynamic sites are interactive, and the user experience can be tailored to the visitor. These are the ones that might remember who you are across visits or deliver content that's most applicable to the region you're visiting from. They rely on a content management system (CMS) or database for rendering and can continue to grow in complexity as the organization's needs grow. \n\n## Static sites\n\nStatic sites, however, generally display the same content to all users. They use server-side rendering to serve HTML, CSS, and Javascript files. While CMS backends have made dynamic sites easier to launch and maintain, static sites continue to grow in popularity.\n\n## What is a static site generator?\n\nA static site generator (SSG) is a software tool that generates a static website by processing plain text files that contain the websites content and markup. The resulting website consists of a set of HTML, CSS, and JavaScript files, and other assets, that can be served by a web server to visitors.\n\nTo use a static site generator, you typically write your website's' content in a markup language like Markdown, and use a templating language to define the website's layout and structure. You then run the static site generator to generate the final set of HTML, CSS, and JavaScript files, which can be deployed to a web server or content delivery network (CDN) for visitors to access.\n\n## Benefits of using a static site generator\n\nStatic sites' advantages include speed, security, and SEO. They're also easy to maintain and highly scalable. Because the static site generators store an already-compiled page on a CDN, they load a lot faster.\n\nAs static site generators are comprised solely of static files, no database is required, resulting in many additional benefits:\n\n* There is no need to spend valuable time querying the database or running any resource-intensive server-side scripts.\n\n* There are no extraneous libraries, no databases, or other features that a dynamic generator is built with. All you have are static files. Therefore, it’s very simple to work with and migrate as needed.\n\n* There’s no database for would-be hackers to attack. \n\n* Since there is no need for scripts to run on a file-based generator, scaling is very easy without overwhelming your server.\n\nAll static site generators can be exciting and fun, but some require time and effort on configurations, detailed templating, or management tweaks. My team and I joke that I am one of the top blog-less SSG experts, so in this blog post, I’ll walk you through a toolkit for evaluating your project and then share some SSGs that deploy to GitLab Pages. \n\nHere are the SSGs I'll review in this post:\n\n- [**Hugo**](https://gohugo.io/) is written in Go with support for multi-language sites and complex content strategy.\n- [**Zola**](https://www.getzola.org) is written in Rust with a single binary, no dependencies, and flexible features like Sass compilation.\n- [**Jekyll**](https://jekyllrb.com/) is written in Ruby, built for blogging, and has a large collection of plugins and themes.\n- [**Hexo**](https://hexo.io/) is Node.js based with support for multiple templating engines, integrations with NPM packages, and one command deployment.\n- [**GatsbyJS**](https://www.gatsbyjs.com/) is React-based, works with any CMS, API, or database, and can be used for building headless experiences.\n- [**Astro**](https://gitlab.com/pages/astro) is Javascript-based, supports multiple frameworks, and is known for on-demand rendering via partial hydration.\n\n##  An Evaluation Toolkit\n\nWith so many static site generators available, selecting one for your project can be overwhelming. When evaluating which SSG is right for you, here are a few things to consider about your project, use case, and the type of work you're looking to put into the site. \n\n**Identify the use case**\n\nIt’s important to understand your site's needs, purpose, and content. Are you building a personal blog, a landing page for a product, or documentation for a tech project? Consider whether you need a streamlined editor experience, content, and interactions with your user. The better you can identify the experience you'd like your visitors to have, the easier it will be to pick the feature set that can best support it. \n\n**Specify languages and frameworks**\n\nThere are so many static site generators out there that you can find one in nearly every language and framework. Consider whether you want to learn a new language or use something you're familiar with. Depending on how much time you’d like to invest in setting up, you should also review the installation details and see if you’re familiar with the templating language, dependencies, and theming layer. \n\n**Review the ecosystem**\n\nMany static site generators will have starter repositories or sample sites where you can play around with functionality and components before diving into your project. When reviewing the ecosystem, think about the limitations of the templating engine and whether you’ll need dynamic or Javascript components, and how you’ll include them. Some generators will have out-of-the-box or community-contributed plugins and extensions. \n\n**Check out the community**\n\nThere are often Discord or forum communities where you can get support, share ideas, review cases studies, and see what other people are building. Some of the most popular generators might even have conferences or workshops for getting started on more complex topics. \n\n**Identify the specialty** \n\nFrom microblogging to academic portfolios and small business sites, static site generators cover various use cases for different people. Each SSG has its own thing, whether it’s the framework it uses, a unique feature in its templating language, or the size of the installation binary.\n\n## The Single Binary Approach \n\nSome static site generators install a single binary and don't require complex dependency management. The single binary approach gets things set up quickly and easily. It is also easier for non-technical or academic users since you can pass the executable around for installation and use a markup language like Markdown to write content. \n\n[**Hugo**](https://gohugo.io/) is written in Go, a statically compiled language, with support for multiple platforms. The Hugo binary can be downloaded and run from anywhere and is simple to install, with no runtime dependencies or installation process. Upgrades involve downloading a new binary, and you're all set. Hugo supports unlimited content types, taxonomies, dynamic content driven from an API, multi-lingual sites, and markdown. It also ships with premade templates making it easy to get started with menus and site maps. \n\nOne of the advantages of using Hugo is that it doesn't depend on client-side JS. It also has a thriving community with many prebuilt themes and starter repositories. There is an [existing sample site in Hugo that deploys to GitLab pages](https://gitlab.com/pages/hugo). If you're migrating to Hugo from another SSG, you can use the [hugo import](https://gohugo.io/commands/hugo_import/) command or one of the [community-developed migration tools](https://gohugo.io/tools/migrations/). There's a [Hugo site example](https://gitlab.com/pages/hugo) on GitLab pages to help you get started.\n\n[**Zola**](https://www.getzola.org) is a strongly opinionated SSG written in Rust that uses the Tera template engine. It's available as [a prebuilt binary](https://github.com/getzola/zola/releases), is super-fast to set up, and comes with some essential features like syntax highlighting, taxonomies, table of contents, Sass compilation, and hot reloading. The Tera templating engine supports build-in short-codes to inject more complex HTML or for repetitive data-driven sections. Configuration for Zola sites is managed in TOML.\n\nOne of the limitations of Zola might be the lack of a built-in scripting language. Unlike other SSGs, there isn't an ecosystem of plugins you can add to your site. Many in the community appreciate this lack of modularity because Zola’s specialty is content-driven sites. [One of the most popular posts on their Discourse forum is a proposal for plugins](https://zola.discourse.group/t/proposal-plugin/975) which discusses ways to include dynamic loading for plugins without affecting the single binary distribution. \n\nZola is commonly used for content-driven websites. One of its notable features is how content is structured using a tree with sections and pages.  There is no example site on GitLab pages, but the Zola documentation includes a [guide on how to deploy to GitLab pages](https://www.getzola.org/documentation/deployment/gitlab-pages/). \n\n## The Standard Approach\n\nWhen it comes to generators and frameworks, you might hear, \"Boring is better.\" Sometimes the preferred SSG is feature complete, well documented, and has a community of examples and plugins to support it - even if it's not actively growing anymore. \n\n[**Jekyll**](https://jekyllrb.com/) is a static site generator written in Ruby and released in 2008. It paved the way for static sites by replacing the need for a database and inspiring developers to start creating blogs and documentation pages quickly and easily. It uses the Liquid templating language, has a vast plugin ecosystem, and is known to be beginner-friendly since it’s just HTML (or Markdown, if you prefer). While it doesn’t provide many features out of the box, Jekyll supports Ruby plugins for any functionality you might need. There are over [200 plugins](https://github.com/topics/jekyll-plugin), themes, and resources available to use.\n\nOne of the challenges when working with Jekyll can be the requirement of having a whole Ruby development environment to build your site. This can be tricky for developers unfamiliar with Ruby or when making updates. Another thing to consider is the build pipeline - it supports Sass compilation out of the box, but the community recommends using webpack to build assets instead. If you're migrating to Jekyll from another framework or CMS, there are [importers](https://import.jekyllrb.com/docs/home/) that can help automate part of the process. There is a [Jekyll site example that deploys to GitLab pages](https://gitlab.com/pages/jekyll).\n\n[**Hexo**](https://hexo.io/) is a NodeJS static site generator that offers itself as a blogging framework. It has built-in support for Markdown, front matter, and tag plugins. It specializes in creating markup-driven blogs. Hexo provides the Nunjucks template engine by default, but you can easily install additional plugins to support alternative templating engines. Like Jekyll, Hexo also [supports migrations](https://hexo.io/docs/migration#content-inner) from several popular frameworks, including WordPress. \n\nA notable feature of Hexo is tag plugins. Tag plugins are snippets of code you can add to your Markdown files without having to write complex or messy HTML to render specific content. Hexo supports several tag plugins, including block quotes, Twitter and Youtube embeds, and code blocks. There’s an [example site for Hexo that deploys to GitLab pages](https://gitlab.com/pages/hexo) and also a [guide in the Hexo documentation](https://hexo.io/docs/gitlab-pages). \n\n## SSGs and beyond\n\nFor those who love flexibility and modularity, there are some SSGs that allow you do everything from full content moderation support and dynamic API-driven content to state management and partial rendering. \n\n[**GatsbyJS**](https://www.gatsbyjs.com/) is an open-source React-based static site generator optimized for speed and has an extensive plugin library. GatsbyJS supports routing, and handling images, accessibility, and hot reloading out of the box. To improve performance, it loads only the critical elements of the page and prefetches assets for other pages to load them as quickly as possible. It also uses webpack to bundle all of your assets. \n\nGatsbyJS believes in a “content mesh” where third-party platforms provide specialized functionality to the base architecture. It allows you to seamlessly pull data from multiple sources, making it popular for Headless approaches with a CMS backend like Drupal, WordPress, or Contentful. You use GraphQL to query the APIs and manage data throughout your site. The GatsbyJS community has contributed over 2000 plugins, including starter repositories and templates that you can use to get started. There’s an [example GatsbyJS site that deploys to GitLab pages](https://gitlab.com/pages/gatsby). \n\n[**Astro**](https://gitlab.com/pages/astro) is a Bring Your Own Framework (BYOF) static site generator with no package dependencies. You can build your site with any JavaScript framework or web components, and Astro will render it into static HTML and CSS. This flexibility has made it popular since it’s future-proof for migrations. Astro ships with automatic sitemaps, RSS feeds, and pagination. It uses Snowpack to compile Javascript, which supports hot module replacement, ES6 modules, and dynamic imports without extra configuration. The project is still a [Beta release with the 1.0 coming in June 2022](https://twitter.com/astrodotbuild/status/1512505549354639363?s=20&t=zXDUGuYmbiOp08FTETXw5A). \n\nA notable feature of Astro is partial hydration. If you decide that parts of your site need interactivity, you can “hydrate” just those components when they become visible on the page. This way, your pages will load super fast by default and have [“islands of interactivity”](https://docs.astro.build/en/core-concepts/partial-hydration/#island-architecture). There are several themes, plugins, components, and showcase projects available. Astro has [an online playground](https://astro.new/) where you can try out features and integrations in your browser. There’s also [an Astro example site on GitLab pages](https://gitlab.com/pages/astro). \n\n## Creating your own SSG\n\nSometimes, the best part of building a static site is creating a custom generator based on a specific programming language, architecture, and feature set. You might find that the process of creating a static site generator is more exciting than actually writing blogs for your site. Consider several preferences, from document structure to a templating language, theming support, custom plugins, and the build pipeline. You’ll have the opportunity to customize the features to your liking. And there are many [static site generators that deploy to GitLab pages](https://gitlab.com/pages) to provide inspiration! \n\n\nCover image by [Kelly Sikkema](https://unsplash.com/photos/gcHFXsdcmJE) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[1979,268,703],{"slug":2778,"featured":6,"template":678},"comparing-static-site-generators","content:en-us:blog:comparing-static-site-generators.yml","Comparing Static Site Generators","en-us/blog/comparing-static-site-generators.yml","en-us/blog/comparing-static-site-generators",{"_path":2784,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2785,"content":2791,"config":2796,"_id":2798,"_type":16,"title":2799,"_source":17,"_file":2800,"_stem":2801,"_extension":20},"/en-us/blog/deploy-shopify-themes-with-gitlab",{"title":2786,"description":2787,"ogTitle":2786,"ogDescription":2787,"noIndex":6,"ogImage":2788,"ogUrl":2789,"ogSiteName":692,"ogType":693,"canonicalUrls":2789,"schema":2790},"How to deploy Shopify themes with GitLab","Streamline your development workflow by configuring auto deployments for Shopify themes with GitLab pipelines.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683288/Blog/Hero%20Images/storefront.jpg","https://about.gitlab.com/blog/deploy-shopify-themes-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to deploy Shopify themes with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Austin Regnery\"}],\n        \"datePublished\": \"2022-04-14\",\n      }",{"title":2786,"description":2787,"authors":2792,"heroImage":2788,"date":2793,"body":2794,"category":14,"tags":2795},[1741],"2022-04-14","\n[1.75 million sellers are using Shopify's eCommerce platform](https://backlinko.com/shopify-stores), and every one of these online stores has a codebase that lives somewhere. You may have encountered some challenges while scaling your development efforts at your organization while working within Shopify. Setting up a process for repeatable deployments with GitLab can keep everything streamlined and safe. No one wants something going live in production before it's ready.\n\nHere's a simple development flow you are going to be able to replicate using GitLab CI/CD pipelines for Shopify theme deployments.\n\n1. Develop locally on a feature branch until you are happy with your local changes\n2. Merge your `feature` branch into your `main` branch → This will update the staging theme in Shopify\n3. When everything is ready to go live, create a new tag and push it to GitLab → The live theme will be updated automatically 🎉\n\nThis tutorial assumes you have set up a repository in a GitLab project.\n\n## 1. Add your variables\n\nFor security purposes, you don't want to store your credentials for your Shopify site in your configuration file. You can use variables in GitLab to handle that.\n\nUse the [ThemeKit CLI](https://shopify.github.io/themekit/configuration/) to retrieve all the available theme IDs from your Shopify store by entering this into your command line:\n\n```curl\ntheme get --list -p=[shopify-api-access-token] -s=[your-store.myshopify.com]\n```\n\n> **Help:** [Generate API credentials in Shopify](https://shopify.dev/apps/auth/basic-http#step-2-generate-api-credentials)\n\nOpen your project in GitLab, navigate to `/settings/ci_cd`, and open the variables section.\n\nAdd four unique variables with their corresponding keys and values\n\n| Key | Value |\n| --- | ----- |\n| `STAGING_THEME_ID`     | [staging-theme-id-number]    |\n| `PRODUCTION_THEME_ID`  | [production-theme-id-number] |\n| `SHOP_WEB_ADDRESS`     | [your-store.myshopify.com]   |\n| `SHOPIFY_API_ACCESS_TOKEN` | [shopify-api-access-token]  |\n\n> **Note:** A protected variable will not show in the CI logs, which adds an extra layer of security. If you choose to protect your variables, you need to make sure that your `main` branch and the tag `v*` wildcard are protected as well.\n\n## 2. Add a `config.yml` to your project repository\n\nThis file may already exist, but `config.yml` needs to have the following to properly map the variables from step 1 with your Shopify theme for deployments.\n\n```yml\nstaging:\n  password: ${SHOPIFY_API_ACCESS_TOKEN}\n  theme_id: ${STAGING_THEME_ID}\n  store: ${SHOP_WEB_ADDRESS}\n\nproduction:\n  password: ${SHOPIFY_API_ACCESS_TOKEN}\n  theme_id: ${PRODUCTION_THEME_ID}\n  store: ${SHOP_WEB_ADDRESS}\n```\n\n## 3. Add a `.gitlab-ci.yml` file to your project\n\nNow set up your pipeline to run on specific triggers. Go to your local theme folder, create a `.gitlab-ci.yml` file at the project root, and add the snippet below. This snippet is the configuration for the CI pipeline.\n\n```yml\nimage: python:2\n\nstages:\n  - staging\n  - production\n\nstaging:\n  image: python:2\n  stage: staging\n  script:\n    - curl -s https://shopify.github.io/themekit/scripts/install.py | python\n    - theme deploy -e=staging\n  only:\n    variables:\n      - $CI_DEFAULT_BRANCH == $CI_COMMIT_BRANCH\n\nproduction:\n  image: python:2\n  stage: production\n  script:\n    - curl -s https://shopify.github.io/themekit/scripts/install.py | python\n    - theme deploy -e=production --allow-live\n  only:\n    - tags\n\n```\n\nIt has two stages: **staging** and **production**. Each will install the ThemeKit CLI first and then deploy the repository to the corresponding theme.\n\n## 4. Now push some changes to deploy\n\nAny code pushed to the `main` branch will set up a deployment to the staging theme in Shopify\n\n```\ngit commit -am \"commit message\"\ngit push\n```\n\nWhen you are ready to push changes to production, add a tag and push it.\n\n```\ngit tag -a \"v1.0.0\" -m \"First release to production from GitLab\"\ngit push --tags\n```\n\n> **Alternative option:** [Create a tag from GitLab](https://docs.gitlab.com/ee/user/project/releases/#create-a-release-in-the-tags-page)\n\nThat's it! You're now using CI to automate deployments from GitLab to your Shopify themes.\n\nFurther refine this workflow by [incorporating merge requests approvals](/blog/feature-highlight-merge-request-approvals/), [setting up merge trains](/blog/merge-trains-explained/), or learning more about [GitLab CI/CD pipelines](/blog/guide-to-ci-cd-pipelines/).\n\nBig thanks to Alex Gogl for their [blog](https://medium.com/@gogl.alex/how-to-deploy-shopify-themes-automatically-1ac17ee1229c). This `.gitlab-ci.yml` will appear as an available template when [merge request !52279](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/52279) is merged.\n\nCover image by [Artem Gavrysh](https://unsplash.com/@tmwd?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/)\n",[726],{"slug":2797,"featured":6,"template":678},"deploy-shopify-themes-with-gitlab","content:en-us:blog:deploy-shopify-themes-with-gitlab.yml","Deploy Shopify Themes With Gitlab","en-us/blog/deploy-shopify-themes-with-gitlab.yml","en-us/blog/deploy-shopify-themes-with-gitlab",{"_path":2803,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2804,"content":2810,"config":2815,"_id":2817,"_type":16,"title":2818,"_source":17,"_file":2819,"_stem":2820,"_extension":20},"/en-us/blog/the-ultimate-guide-to-gitops-with-gitlab",{"title":2805,"description":2806,"ogTitle":2805,"ogDescription":2806,"noIndex":6,"ogImage":2807,"ogUrl":2808,"ogSiteName":692,"ogType":693,"canonicalUrls":2808,"schema":2809},"The ultimate guide to GitOps with GitLab","This eight-part tutorial series demonstrates how to use GitLab as a best-in-class GitOps tool.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670178/Blog/Hero%20Images/GitLab-Ops.png","https://about.gitlab.com/blog/the-ultimate-guide-to-gitops-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The ultimate guide to GitOps with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2022-04-07\",\n      }",{"title":2805,"description":2806,"authors":2811,"heroImage":2807,"date":2812,"body":2813,"category":14,"tags":2814},[2014],"2022-04-07","\n\nIt is possible to use GitLab as a best-in-class GitOps tool, and this blog post series is going to show you how. [GitOps](/topics/gitops/) is an operational framework that takes DevOps best practices used for application development such as version control, collaboration, compliance, and CI/CD tooling, and applies them to infrastructure automation. This series of easy-to-follow tutorials will focus on different user problems, including provisioning, managing a base infrastructure, and deploying various third-party or custom applications on top of them, that can be solved by pairing GitOps with GitLab.\n\nHere are 8 tutorials on how to do GitOps with GitLab:\n\n**1. [Here's how to do GitOps with GitLab](/blog/gitops-with-gitlab/)**\n\nThis tutorial sets the stage for what you will learn throughout the series, including the tech concepts you'll need to know.\n\n**2. [Infrastructure provisioning with GitLab and Terraform](/blog/gitops-with-gitlab-infrastructure-provisioning/)**\n\nThis tutorial walks you through setting up the underlying infrastructure using GitLab and Terraform.\n\n**3. [Connect with a Kubernetes cluster](/blog/gitops-with-gitlab-connecting-the-cluster/)**\n\nThis tutorial demonstrates how to connect a Kubernetes cluster with GitLab for pull- and push-based deployments and easy security integrations.\n\n**4. [How to tackle secrets management](/blog/gitops-with-gitlab-secrets-management/)**\n\nThis tutorial builds on the previous tutorial to show you how to use a Kubernetes cluster connection to manage secrets within a cluster.\n\n**5. [The CI/CD tunnel](/blog/gitops-with-gitlab-using-ci-cd/)**\n\nThis tutorial introduces you to CI/CD tunnels and shows step-by-step how to access a Kubernetes cluster using GitLab CI/CD.\n\n**6. [Connecting GitLab with a Kubernetes cluster - Auto DevOps](/blog/gitops-with-gitlab-auto-devops/)**\n\nThis tutorial looks at how you can use Auto DevOps with all its bells and whistles to easily manage deployments.\n\n**7. [Connecting GitLab with a Kubernetes cluster for GitOps-style application delivery](/blog/gitops-with-gitlab/)**\n\nThis tutorial shows you how to connect an application project to a manifest project for controlled, GitOps-style deployments.\n\n**8. [Turn a GitLab agent for Kubernetes installation to manage itself](/blog/gitops-with-gitlab-manage-the-agent/)**\n\nThis tutorial is the culmination of the previous tutorials and will teach you how to turn a GitLab agent for Kubernetes installation to manage itself.\n\n\n**Read more about GitOps:**\n- [GitLab for GitOps](/solutions/gitops/)\n- [What is GitOps](/topics/gitops/)\n- [GitOps viewed as part of the Ops evolution](/blog/gitops-as-the-evolution-of-operations/)\n- [How to use a push-based approach for GitOps with GitLab scripting and variables](/blog/how-to-agentless-gitops-vars/)\n\n\n\n\n",[894,726,535],{"slug":2816,"featured":6,"template":678},"the-ultimate-guide-to-gitops-with-gitlab","content:en-us:blog:the-ultimate-guide-to-gitops-with-gitlab.yml","The Ultimate Guide To Gitops With Gitlab","en-us/blog/the-ultimate-guide-to-gitops-with-gitlab.yml","en-us/blog/the-ultimate-guide-to-gitops-with-gitlab",{"_path":2822,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2823,"content":2829,"config":2835,"_id":2837,"_type":16,"title":2838,"_source":17,"_file":2839,"_stem":2840,"_extension":20},"/en-us/blog/gitlab-top-devops-tooling-metrics-and-targets",{"title":2824,"description":2825,"ogTitle":2824,"ogDescription":2825,"noIndex":6,"ogImage":2826,"ogUrl":2827,"ogSiteName":692,"ogType":693,"canonicalUrls":2827,"schema":2828},"The top DevOps tooling metrics and targets at GitLab","Here is how we measure DevOps success and why we always try to look forward.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665635/Blog/Hero%20Images/blog-performance-metrics.jpg","https://about.gitlab.com/blog/gitlab-top-devops-tooling-metrics-and-targets","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The top DevOps tooling metrics and targets at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mek Stittri\"}],\n        \"datePublished\": \"2022-04-05\",\n      }",{"title":2824,"description":2825,"authors":2830,"heroImage":2826,"date":2832,"body":2833,"category":14,"tags":2834},[2831],"Mek Stittri","2022-04-05","\n\nA successful DevOps practice relies heavily on metrics. Here at GitLab, we use seven key DevOps metrics to measure engineering efficiency and productivity.  Like many teams, we use industry standard metrics, but in some cases, we approach this data with a unique GitLab point of view. Here’s the first in a multipart look at the DevOps metrics we at GitLab think are most critical for success. Compare your metrics and results with ours, and [let’s get a conversation started](https://gitlab.com/gitlab-com/www-gitlab-com/-/issues/13202).\n\n## Master pipeline stability\n\nIt’s important to be able to measure the stability of the GitLab project’s master branch pipeline. This metric tells us how stable the main branch is, and ensures engineers are checking out code that’s in good shape. [Merge trains](https://gitlab.com/gitlab-org/quality/team-tasks/-/issues/195) are key to this effort. \n\nOur target percentage for [master pipeline stability](/handbook/engineering/quality/performance-indicators/#master-pipeline-stability  ) is above 95%.\n\n![master pipeline stability](https://about.gitlab.com/images/blogimages/dometrics1.png)\n\n## Review app deployment success rate\n\nAt GitLab we take [review apps](https://docs.gitlab.com/ee/ci/review_apps/) seriously.  We measure their success rate so we can understand the stability of our first deployed environment after code change. Review apps are spun up at MR submission. It’s important to monitor our review app successful deployments because it’s the first place where code is integrated and deployed as one unit. This metric ensures the codebase can be installed, tested, and made available for the team to preview their changes before merging into the main master branch. \n\nOur target for [review application deployment success](/handbook/engineering/quality/performance-indicators/#review-app-deployment-success-rate) is above 99%. \n\n![review app deployment success](https://about.gitlab.com/images/blogimages/dometrics2.png)\n\n## Time to First Failure\n\nTime to First Failure (TtFF, pronounced as “teuf”) measures how fast we are providing feedback to engineers. This metric examines how long it takes from pipeline creation to the first actionable failed build. The idea is that if the commit is going to fail, it should fail fast and the fail signal should get to the engineers as quickly as possible. The shorter the time to first failure, the faster the feedback loop, and faster time to action to address those failures. \n\nOur [TtFF target](/handbook/engineering/quality/performance-indicators/#time-to-first-failure) is less than 15 minutes.\n\n![TtFF or Time to First Failure](https://about.gitlab.com/images/blogimages/dometrics3.png)\n\n## Open S1 bug age\n\nThis metric focuses on the age of open S1 bugs. Many organizations measure time to close bugs. At Gitlab we focus on the age of bugs remaining. We structure the metric to focus on work that is remaining and can be actioned on. If we only measure time to close of fixed defects, we may miss addressing older defects and unintentionally incentivize closing of only newer defects. We like to look forward by asking ourselves “What’s left?” and “What can be done now?” rather than only looking backward at what’s already been done.\n\nOur target for [S1 open bug age](/handbook/engineering/quality/performance-indicators/#s1-oba) is under 100 days.\n\n![Open S1 bug age](https://about.gitlab.com/images/blogimages/dometrics4.png)\n\n## Open S2 bug age\n\nThis metric is similar to the open S1 bug age, but is focused on S2 bugs. Again, we measure the age of remaining open bugs rather than focusing on bugs that have been closed.\n\nOur target for the [open S2 bug age](/handbook/engineering/quality/performance-indicators/#s2-oba) metric is below 300 days.\n\n![Open S2 bug age](https://about.gitlab.com/images/blogimages/dometrics5.png)\n\n## Merge request pipeline duration\n\nWhen a pipeline is started for a merge request, how long does it take to run? This metric focuses on the duration of merge request pipelines and its time efficiency.  Within the total duration we break the data down into multiple  stages The team then iterates and improves time efficiencies of each stage of the pipeline. This is a key building block for improving GitLab’s code cycle time and efficiency and ensures the code is merged in a timely manner.\n\nOur target for this metric is below 45 minutes.\n\n![MR pipeline duration](https://about.gitlab.com/images/blogimages/dometrics6.png)\n\n## MR pipeline costs\n\nWe use this metric at GitLab to help us determine our Merge Request Pipeline cost efficiency. We look at the total costs for the CI runners machines for MR pipelines. Once we’ve determined that figure, we divide it by the number of merge requests. This helps us monitor cost while fine-tuning efficiency. Speed and cost moves in different directions. To help speed up you can increase resources, but it comes at a cost. Monitoring this metric enables us to be balanced and have a healthy trade-off between optimizing for cost and speed.\n\nOur target for the [MR pipeline costs](/handbook/engineering/quality/performance-indicators/#merge-requests-pipeline-cost) metric is below 7.50.\n\n![MR pipeline costs](https://about.gitlab.com/images/blogimages/dometrics7.png)\n\n## What DevOps tooling metrics are most effective for your team?\n\nWe’d like to hear what you think of our choices, and our targets, and what works, or doesn’t, for you. [Chime in here](https://gitlab.com/gitlab-com/www-gitlab-com/-/issues/13202).\n",[894,915,1328],{"slug":2836,"featured":6,"template":678},"gitlab-top-devops-tooling-metrics-and-targets","content:en-us:blog:gitlab-top-devops-tooling-metrics-and-targets.yml","Gitlab Top Devops Tooling Metrics And Targets","en-us/blog/gitlab-top-devops-tooling-metrics-and-targets.yml","en-us/blog/gitlab-top-devops-tooling-metrics-and-targets",{"_path":2842,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2843,"content":2849,"config":2856,"_id":2858,"_type":16,"title":2859,"_source":17,"_file":2860,"_stem":2861,"_extension":20},"/en-us/blog/learn-python-with-pj-part-3",{"title":2844,"description":2845,"ogTitle":2844,"ogDescription":2845,"noIndex":6,"ogImage":2846,"ogUrl":2847,"ogSiteName":692,"ogType":693,"canonicalUrls":2847,"schema":2848},"Learn Python with Pj! Part 3 - Functions and strings","Pj shares his experiences learning how to program functions and strings.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664962/Blog/Hero%20Images/python.jpg","https://about.gitlab.com/blog/learn-python-with-pj-part-3","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Learn Python with Pj! Part 3 - Functions and strings\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"PJ Metz\"}],\n        \"datePublished\": \"2022-04-04\",\n      }",{"title":2844,"description":2845,"authors":2850,"heroImage":2846,"date":2852,"body":2853,"category":14,"tags":2854},[2851],"PJ Metz","2022-04-04","\n\n_This is the third installment in the Learn Python with Pj! series. Make sure to read [Part 1](/blog/learn-python-with-pj-part-1/) and [Part 2](/blog/learn-python-with-pj-part-2/)._\n\n\nIn learning Python, I’m happy to have found a language with a straightforward syntax that just seems to make sense. I don’t have to define a type; Python just knows. I don’t have to worry about `let` or `const` or `var` for different use cases; I just make the variable. I’m very glad I learned C# and JavaScript first, as those feel important to understanding exactly what is happening when I write code. In turn, I think it’s made Python easier for me, which is usually true when learning another programming language: Your second and third are easier to learn since your brain understands “programming logic” better now than when you made your first “Hello World”. This week we’re going to talk about what I’ve learned in functions and strings. \n\n## Functions\n\nFunctions are the backbone of any app you write. It’s an important step in learning any language to learn how to put a series of actions inside a single function that can be called later in the code. Python does this simply compared to other languages I’ve learned. \n\n```python\ndef my_first_function(arg1, arg2):\n  print(f”Your input was {arg1} and {arg2}.”)\n\n#prints “Your input was hilarious and unnecessary”\nmy_first_function(“hilarious”, “unnecessary”)\n```\nUsing the keyword `def` lets Python know that you’re about to write a function. Inside the parentheses, you put any parameters that must be included when calling the function. Some people use argument and parameter interchangeably, but technically, when defining a function, it’s a parameter, and when calling a function, it’s an argument. Either way, when defining the function, include some variables that you’ll expect when the function is called later. Finally, put a colon and then move to the next line. All the code for the function to run is indented. Inside the function, you can run loops, logic, or even other functions. Let’s check out a slightly more complex use. \n\n```python\ndef halloween_horror_nights(days, link,):\n    named = input(\"What is your name?\")\n    name = named.capitalize()\n    if isinstance(days, int):\n        if days == 0:\n            print(f\" Hello, {name}. We're ready to see you at HHN. {link}\")\n        elif days \u003C= 30:\n            print(f\"{name}, You have {days} days until the terror is home. {link}\")\n        elif days \u003C= 60:\n            print(f\"{name}, The horror comes home in {days} days. Join us in the dark. {link}\")\n        elif days \u003C 365 and days > 60:\n            print(f\"{name}, Patience is a virtue. You're {days} days away from the top rated Halloween event in the world.{link}\")\n        else:\n            print(f\"{name}, it can't be more than a year away. It's closer than you think... {link}\")\n    else:\n        print(\"Days must be an int\")\n    \n    \n#This will print “{Name input by user}, The horror comes home in 56 days. Join us in the dark. https://orlando.halloweenhorrornights.com/site\"\nhalloween_horror_nights(56, \"https://orlando.halloweenhorrornights.com/site\")\n```\n\nFor context, Halloween Horror Nights in Orlando is my favorite event of the year. This function takes in a number of days and a link (meant to be days until the event and a link to the HHN web page) and outputs a string that says how many days are left until the event. The string also includes a link to the web page and asks for user input to personalize each string. The function `isinstance()` checks if `days` is an int to make sure the sentence makes sense and returns `True` if the first argument is the type of the second argument. \n\nI really found functions in Python to be a lot easier than in other languages, though I still miss the curly brackets of C# and JavaScript. Additionally, the simplicity of `def` followed by the function name and any required parameters is really straightforward and makes reading the code easier. And since code is read more than it’s written, that makes Python pretty awesome in my book. \n\nI also used the f-string format for these print statements, and it’s still one of my favorite ways to concatenate. It feels easier than a lot of the other ways of inserting variables into a string in Python, and a little easier than the way it’s done in JavaScript, at least to me. I use a different method of including variables in a string called `.format()`. \n\nMaking your own functions is important, but there are a bunch of built-in functions in Python. There are also methods, which are similar to functions but are associated with the objects in a class they’re assigned to. Let’s talk about some strings and some methods that come with them.\n\n## Strings\n\nI thought it was strange that I had a whole section on strings in my Codecademy Python curriculum, but I soon realized that it was giving me a lot of very useful methods to use on strings that seemed very versatile. The most interesting thing to me is that strings are an object and act like a list of characters. I’m not entirely sure how strings are treated in other languages, but this really struck me as a cool idea. You can even call specific characters using the same syntax you would for a list. \n\n```python\nspooky = “Halloween Horror Nights is my favorite thing about Autumn.”\n\n#the following prints “l” since it’s the 3rd char in the string `spooky`. \nprint(spooky[2]) \n```\n\nOr you can use a for loop on a string.\n\n```python\n#This prints each letter on a new line and capitalizes it. The message now reads vertical in the output.\nfor letter in spooky:\n  print(letter.upper())\n```\n\n### String methods\t\n\nA few built-in methods exist for strings in Python, like `capitalize()` and `upper()`, two I used in the above examples. In addition to those, there are many more that can do things like remove the whitespace or noise from the beginning and end of a string, tell you the index of the first appearance of something, or join a list of strings into a single string. There are lots of great included methods. Here’s an exercise I took from Codecademy and changed the content to fit this article's theme. \n\n```python\n#given a string that contains a ton of information separated by semicolons and commas. Each part is a haunted house name, Universal Studios location, and the year the house appeared at the event.\nhhn_houses_location_year = \"Chucky;Japan;2016, Run;Orlando;2001, The Orfanage: Ashes to Ashes;Orlando;2010, The Real: Haunted Village;Japan;2021, The Undertaker: No Mercy;Hollywood;2000, Welcome to Silent Hill;Hollywood;2012, American Werewolf in London;Orlando;2013\"   \n\n#this splits the string up into a list where each element of the list is the section separated by a comma.\nhhn_houses_list = hhn_houses_location_year.split(\",\")\n\n#empty list for the next step\nhhn_houses_stripped = []\n\n#this strips any whitespace from the element in the list and adds it to the empty list from before\nfor house in hhn_houses_list:\n    hhn_houses_stripped.append(house.strip())\n\n#empty list for next step\nhhn_house_details = []\n\n#the next few lines split the details into their own list. \n#first, each house, with the details, is split along the semicolons to make a list of lists, with each house being its own element in the larger list\n#next, empty lists are made for each detail\n#finally, using index numbers, each detail is placed in it’s own list so all the houses, locations, and years are separated. \nfor info in hhn_houses_stripped:\n    hhn_house_details.append(info.split(\";\"))\n\nhouse = []\nlocation = []\nyear = []\n\nfor stuff in hhn_house_details:\n    house.append(stuff[0])\n    location.append(stuff[1])\n    year.append(stuff[2])\n\n#loops through and using .format() prints a sentence that tells about each house. \nfor num in range(0, len(house)):\n  print(\"{} was located in {} for the {} event\".format(house[num], location[num], year[num]))\n```\n\nAs you can see, I am obsessed with Halloween horror nights… er, wait, not the point of the article. As you can see, Python’s built-in methods for strings can be pretty useful, especially if you end up with a bunch of data sitting around in unformatted strings. Next time, we’re going to talk about Dictionaries and how they are used in Python! \n",[1508,2855,726],"growth",{"slug":2857,"featured":6,"template":678},"learn-python-with-pj-part-3","content:en-us:blog:learn-python-with-pj-part-3.yml","Learn Python With Pj Part 3","en-us/blog/learn-python-with-pj-part-3.yml","en-us/blog/learn-python-with-pj-part-3",{"_path":2863,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2864,"content":2869,"config":2875,"_id":2877,"_type":16,"title":2878,"_source":17,"_file":2879,"_stem":2880,"_extension":20},"/en-us/blog/gitops-with-gitlab-manage-the-agent",{"title":2865,"description":2866,"ogTitle":2865,"ogDescription":2866,"noIndex":6,"ogImage":2807,"ogUrl":2867,"ogSiteName":692,"ogType":693,"canonicalUrls":2867,"schema":2868},"Self-managing Kubernetes agent installation with GitOps","This is the eighth and last article in a series of tutorials on how to do GitOps with GitLab.","https://about.gitlab.com/blog/gitops-with-gitlab-manage-the-agent","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitOps with GitLab: Turn a GitLab agent for Kubernetes installation to manage itself\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2022-03-30\",\n      }",{"title":2870,"description":2866,"authors":2871,"heroImage":2807,"date":2872,"body":2873,"category":14,"tags":2874},"GitOps with GitLab: Turn a GitLab agent for Kubernetes installation to manage itself",[2014],"2022-03-30","\n\n_It is possible to use GitLab as a best-in-class GitOps tool, and this blog post series is going to show you how. These easy-to-follow tutorials will focus on different user problems, including provisioning, managing a base infrastructure, and deploying various third-party or custom applications on top of them. You can find the entire \"Ultimate guide to GitOps with GitLab\" tutorial series [here](/blog/the-ultimate-guide-to-gitops-with-gitlab/)._\n\nIn this article, we will build upon the first few articles, and will turn a GitLab agent for Kubernetes installation to manage itself. This is highly recommended for production usage as it puts your `agentk` deployment under your GitOps project, and enables flawless and simple upgrades.\n\n## Prerequisites\n\nThis article builds on a few previous articles from this series and makes the following assumptions:\n\n- You have [an agent connection set up using the `kpt` based method](/blog/gitops-with-gitlab-connecting-the-cluster/).\n- You have [set up Bitnami's Sealed secrets](/blog/gitops-with-gitlab-secrets-management/).\n- You understand [how to use `kustomize` with the agent](/blog/gitops-with-gitlab/).\n\n## The goal\n\nThe goal of this tutorial is to manage a GitLab agent for Kubernetes deployment using that given agent. This has several benefits, including: \n\n- By turning the agent to manage itself, the agent configuration and deployment is managed in code. As a result, all the code-oriented tools, including Merge Requests, Approvals, and branching are there to support your processes and policies.\n- Managing a fleet of agent installations in code enables simple upgrades of the deployments.\n\n### Upgrading GitLab and the GitLab agent for Kubernetes\n\nA single GitLab instance might have dozens of agent connections. How should you upgrade all these deployments in a coordinated way? Turning everything into code simplifies the upgrade process a lot.\n\nWe have the GitLab - Agent [version compatibility documented](https://docs.gitlab.com/ee/user/clusters/agent/install/index.html#upgrades-and-version-compatibility). The recommended approach is to first upgrade GitLab together with `KAS`, the GitLab-side component of the connection, and then upgrade all the `agentk` deployments. \n\nIf you manage the `agentk` deployments in code, the upgrade requires only bumping the version number in code and the `agentk` instances will take care of upgrading themselves.\n\n## Turning an agent installation to manage itself\n\nLet's do a quick recap and an overview how we wil use the tools.\n\nWe use `kpt` to check out tagged `agentk` deployment manifests. As the manifests are a set of `kustomize` layers, we can extend them with our own overlays if needed, or just customize the setup per our requirements. The agent connection requires a token to authenticate with GitLab. We can use Bitnami's Sealed Secrets to store an encrypted sycret in the repo.\n\nAll the above code can be put under version control safely. Moreover, we can use GitLab CI/CD to dehydrate the `kustomize` package into vanilla Kubernetes manifests that the agent can deal with.\n\nLet's see the above in action!\n\n### Kustomize layer with encrypted secret\n\nBased on the previous articles, we have the `kpt` package checked out under `packages/gitlab-agent`. We would like to store the vanilla Kubernetes manifests in the repository. We can run `kustomize build packages/gitlab-agent/cluster > kubernetes/gitlab-agent.yaml` to get the manifests, but this will include the unencrypted authentication token too.\n\nTo never output the unencrypted token, we should turn it into a sealed secret.\n\nNavigate to the `gitlab-agent` Terraform project, and create a Kubernetes secret from the token `terraform output -raw token_secret | kubectl create secret generic gitlab-agent-token -n gitlab-agent --dry-run=client --type=Opaque --from-file=token=/dev/stdin -o yaml > ../../ignored/gitlab-agent-token.yaml`. If you followed the instructions in the previous articles, the files under the `ignored` directory are never committed to `git`.\n\nWe will turn this unencrypted secret into a sealed secret. As the secret will already exist in the cluster, we should instruct the Bitnami Sealed Secret controller to pull it under its management. Moreover, as kustomize applies a random hash to every secret name, we should enable renaming the secret within the namespace. We can achieve these by adding two annotations to the unencrypted secrets object.\n\nAdd the following annotations to `ignored/gitlab-agent-token.yaml`\n\n```\nannotations:\n  sealedsecrets.bitnami.com/managed: \"true\"\n  sealedsecrets.bitnami.com/namespace-wide: \"true\"\n```\n\nNext, we should create an encrypred secret from the ignored, unencrypted one running `bin/seal-secret ignored/gitlab-agent-token.yaml > packages/gitlab-agent/sealed-secret` in the root of our project. This creates the encrypted secret under `packages/gitlab-agent/sealed-secret/SealedSecret.gitlab-agent-token.yaml`. Now, we need a kustomize layer that will use this secret instead of the original one that came with `kpt`. Let's create the following files around the encrypted secret:\n\n- Create `packages/gitlab-agent/sealed-secret/kustomization.yaml` as:\n\n```yaml\napiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\nresources:\n- ../base\n- SealedSecret.gitlab-agent-token.yaml\ncomponents:\n- ../cluster/components/gitops-read-all\n- ../cluster/components/gitops-write-all\n- ../cluster/components/cilium-alert-read\nconfigurations:\n- configuration/sealed-secret-config.yaml\nsecretGenerator:\n- name: gitlab-agent-token\n  behavior: replace\n  type: Opaque\n  namespace: gitlab-agent\n  options:\n    annotations:\n      sealedsecrets.bitnami.com/managed: \"true\"\n      sealedsecrets.bitnami.com/namespace-wide: \"true\"\n```\n\n- Create `packages/gitlab-agent/sealed-secret/configuration/sealed-secret-config.yaml` as:\n\n```yaml\nnameReference:\n- kind: Secret\n  fieldSpecs:\n  - kind: SealedSecret\n    path: metadata/name\n  - kind: SealedSecret\n    path: spec/template/metadata/name\n```\n\nThis configuration enables us to reference the name of the Sealed Secret in the `secretGenerator`.\n\nWe created a new `kustomize` overlay that builds on the `base` and `cluster` layers, but will use the sealed secret. We can hydrate this into vanilla manifests using `kustomize build packages/gitlab-agent/sealed-secret > kubernetes/gitlab-agent.yaml`. This configuration does not include any unencrypted, sensitive data. As a result, we can commit it freely using `git commit`.\n\n### Adopt the agent by the agent\n\nRight now the agent configuration file looks similar to: \n\n```yaml\ngitops:\n  # Manifest projects are watched by the agent. Whenever a project changes,\n  # GitLab deploys the changes using the agent.\n  manifest_projects:\n  - id: path/to/your/project\n    default_namespace: gitlab-agent\n    # Paths inside of the repository to scan for manifest files.\n    # Directories with names starting with a dot are ignored.\n    paths:\n    - glob: 'kubernetes/test_config.yaml'\n    - glob: 'kubernetes/**/*.yaml'\n```\n\nIf we would push the previously hydrated manifests, `agentk` would fail applying them complaining about missing inventories. We can easily fix this by temporarily setting a looser inventory policy:\n\n```yaml\ngitops:\n  # Manifest projects are watched by the agent. Whenever a project changes,\n  # GitLab deploys the changes using the agent.\n  manifest_projects:\n  - id: path/to/your/project\n    default_namespace: gitlab-agent\n    inventory_policy: adopt_all\n    # Paths inside of the repository to scan for manifest files.\n    # Directories with names starting with a dot are ignored.\n    paths:\n    - glob: 'kubernetes/test_config.yaml'\n    - glob: 'kubernetes/**/*.yaml'\n```\n\nWith the inventory policy configured, we can commit and push our changes to GitLab. The agent will see the new configuration and resources, and will apply them into the cluster. From now on, you can change the code in the repository, push it to git, and the changes will be automatically applied into your cluster.\n\n#### What are inventory policies?\n\nThe GitLab agent for Kubernetes knows about the managed resources using so-called inventory objects. In technical terms, an inventory object is just a `ConfigMap` with a unique label. Whenever the agent sees an object that it should manage, it applies the same label. This way, every agent can easily find the resources that it manages.\n\nYou can read more about the possible [inventory policy configurations in the documentation](https://docs.gitlab.com/ee/user/infrastructure/clusters/deploy/inventory_object.html).\n\n\n#### A word about RBAC\n\nDepending on the authorization rights given to the `agentk` deployment, not every change might be possible. For example, if you would like to create new `ClusterRole` and `ClusterRoleBinding` in a new `kustomize` overlay, and apply that with the Agent, that might fail. It will fail, if your current role-based access control (RBAC) does not allow your `agentk` deployment to create these resources. In this case, you should either provide higher rights to your `agentk` service account first or you should apply the changes manually from your command line.\n\n### Automatic hydration\n\nNow, if you want to change something in your agent deployment, you need to take two actions:\n\n- change the code in the `kpt` package\n- run `kustomize build` to hydrate the results\n\nLet's automate the second step so you can focus on your main job only. Following the setup of [a GitOps-style Auto DevOps pipeline](/blog/gitops-with-gitlab/#hydrating-the-manifests), we need to extend the `hydrate-packages` job:\n\n\n```yaml\nhydrate-packages:\n      ...\n      script:\n      - mkdir -p new_manifests\n      ...\n      - kustomize build packages/gitlab-agent/sealed-secret > new_manifests/gitlab-agent.yaml\n```\n\nWe can re-use all the other automation as presented in the previous articles.\n\n## How to upgrade `agentk`?\n\nJust to provide a practical example, let's see how we can use the above setup to easily upgrade an `agentk` deployment to a newer version.\n\nBy running `kustomize cfg set packages/gitlab-agent agent-version v14.9.1` we set the intended `agentk` version to be version `14.9.1`. You can commit and push this change to git, and lay back in your chair to see how the changes are being rolled out across your clusters. You can point several agent configurations at the same `kubernetes/gitlab-agent.yaml` manifest, and upgrade all of them at once.\n\n## Recap\n\nIn this article we have seen:\n\n- how to turn an Agent deployment to manage itself\n- how to extend the default `kpt` project with a custom `kustomize` overlay to customize the `agentk` deployment\n- how to easily upgrade a set of `agentk` deployments\n- how to pull already existing objects to be managed by the Agent using inventory policies\n\n_Note: This is the final installment in this series of [how to do GitOps with GitLab](/blog/the-ultimate-guide-to-gitops-with-gitlab)._\n\n\n",[535,1002,726],{"slug":2876,"featured":6,"template":678},"gitops-with-gitlab-manage-the-agent","content:en-us:blog:gitops-with-gitlab-manage-the-agent.yml","Gitops With Gitlab Manage The Agent","en-us/blog/gitops-with-gitlab-manage-the-agent.yml","en-us/blog/gitops-with-gitlab-manage-the-agent",{"_path":2882,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2883,"content":2888,"config":2894,"_id":2896,"_type":16,"title":2897,"_source":17,"_file":2898,"_stem":2899,"_extension":20},"/en-us/blog/gitlab-and-testify-sec-witness-alliance",{"title":2884,"description":2885,"ogTitle":2884,"ogDescription":2885,"noIndex":6,"ogImage":2478,"ogUrl":2886,"ogSiteName":692,"ogType":693,"canonicalUrls":2886,"schema":2887},"How to enhance supply chain security with GitLab and TestifySec","New alliance partner TestifySec makes Witness available in GitLab","https://about.gitlab.com/blog/gitlab-and-testify-sec-witness-alliance","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to enhance supply chain security with GitLab and TestifySec\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nicole Schwartz\"}],\n        \"datePublished\": \"2022-03-16\",\n      }",{"title":2884,"description":2885,"authors":2889,"heroImage":2478,"date":2891,"body":2892,"category":14,"tags":2893},[2890],"Nicole Schwartz","2022-03-16","\n\n_This blog post and linked pages contain information related to upcoming products, features, and functionality. It is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog post and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc._\n\nToday, GitLab is excited to announce that our partner [TestifySec](https://www.testifysec.com/) has integrated their [Witness](https://github.com/testifysec/witness) open-source tool into GitLab allowing us to take another step along our [Secure Software Supply Chain Direction](https://about.gitlab.com/direction/supply-chain/).\n\n## Secure software supply chain \n\nAn emerging concern in the software development space is being able to secure your software supply chain, an important element of which is documenting the entire supply chain and development progress by creating a chain of custody starting from code creation, build, test, package, and going through deployment. One important element of this chain of custody is commonly referred to as a Software Bill of Materials [SBOM](https://www.ntia.gov/SBOM). There are also frameworks, such as [SLSA](https://slsa.dev/) which collect additional elements about the process. Together these documents are becoming critical components to satisfying regulated industry requirements.\n\nThere are many opportunities as a DevOps Platform to rise to the challenge of creating transparency around software components or artifacts. \n\n## TestifySec Witness\n\nRecent compromises and attacks on the software supply chain such as Solarburst and Log4shell highlight the need for a new way of securing CI systems and their artifacts. This is why [TestifySec](https://www.testifysec.com/) created [Witness](https://github.com/testifysec/witness).\n\nCI systems are an incredible source of data.  Many CI systems such as GitLab, along with their cloud infrastructure, provide tokens with non-falseable data. Witness verifies and records this data, along with inputs and outputs from a CI process in a verifiable and standardized way.\n\nIn current generation CI systems we restrict the release of artifacts based on pass or failure of build steps. However, most organizations have no standardized way to leverage the metadata available during the CI process in order to inform policy decisions in production environments.\n\nIn next-generation CI systems, data collected during the CI process is not thrown away. Instead, we make this data available to security administrators for use at any policy enforcement point.  With [Witness](https://github.com/testifysec/witness), you shift security left, while communicating risk right.  \n\nOnce an artifact is built it becomes difficult to understand where it was built. Most major cloud providers provide some sort of identity mechanism to verify the instance identity. On AWS this is called the Instance metadata service. The data available in this API is verifiable and is a perfect data structure to make an Witness attestation.\n\nWitness records AWS identity metadata and cryptographically links it to the build artifact and any other events in that CI process.  \n\nYou can [see the demo](https://gitlab.com/testifysec/demos/witness-demo).\n\nGitLab and TestifySec will be enhancing our features around this as time goes on - keep an eye out for more!\n\nRead more about GitLab's [Secure Software Supply Chain Direction](https://about.gitlab.com/direction/supply-chain/).\n",[1307,1347,727],{"slug":2895,"featured":6,"template":678},"gitlab-and-testify-sec-witness-alliance","content:en-us:blog:gitlab-and-testify-sec-witness-alliance.yml","Gitlab And Testify Sec Witness Alliance","en-us/blog/gitlab-and-testify-sec-witness-alliance.yml","en-us/blog/gitlab-and-testify-sec-witness-alliance",{"_path":2901,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2902,"content":2907,"config":2913,"_id":2915,"_type":16,"title":2916,"_source":17,"_file":2917,"_stem":2918,"_extension":20},"/en-us/blog/publishing-obsidian-notes-with-gitlab-pages",{"title":2903,"description":2904,"ogTitle":2903,"ogDescription":2904,"noIndex":6,"ogImage":2478,"ogUrl":2905,"ogSiteName":692,"ogType":693,"canonicalUrls":2905,"schema":2906},"Publishing Obsidian.md notes with GitLab Pages","How to publish your Obsidian.md documents to a GitLab Pages site","https://about.gitlab.com/blog/publishing-obsidian-notes-with-gitlab-pages","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Publishing Obsidian.md notes with GitLab Pages\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Scott Hampton\"}],\n        \"datePublished\": \"2022-03-15\",\n      }",{"title":2903,"description":2904,"authors":2908,"heroImage":2478,"date":2910,"body":2911,"category":14,"tags":2912},[2909],"Scott Hampton","2022-03-15","\n\n[Obsidian.md](https://obsidian.md) is a \"knowledge base\" application that uses plain text Markdown files stored in a local folder to\norganize your notes. The product has been growing in popularity, partly because of how extensible it is. There are a\nlot of community built plugins to help users configure the application to support their specific workflow. There are\nmany people that use Obsidian to write their blog posts. [Obsidian offers a paid service to publish your notes directly](https://obsidian.md/publish)\nand is completely compatible with features Obsidian offers. I suggest you support the Obsidian developers if their product\nworks for you. If you are looking for an alternative way to publish, this blog post provides a tutorial for how to publish your notes using GitLab\nPages.\n\nYou can find an Obsidian.md example in [this demonstration project](https://gitlab.com/gitlab-org/frontend/playground/obsidian-and-gitlab-pages-demo)\nwhich deploys [a GitLab Pages site](https://gitlab-org.gitlab.io/frontend/playground/obsidian-and-gitlab-pages-demo/). \n\n## What is Obsidian markdown?\n\nObsidian is markdown-based system, which means it incorporates tags, plugins and backlinks to create an easy-to-use system. It makes it possible for you to use symbols inside the text that are interpreted as text formatting. This [link](https://www.markdownguide.org/cheat-sheet/) is a cheat sheet of all the mardown syntax elements.\n\n### Benefits of Obsidian.md\n\nPerhaps the most significant benefit of Obsidian markdown (md) is its simple, straightforward design and the excellent support provided. It is also extensible, with plenty of community plugins available. \n\nThere is no proprietary formatting, encoding. This gives you greater control over how you backup files and manage change tracking.\n\nObsidian doesn't support git right out the box, it requires a community plugin called Obsidian Git. However, one the plugin is installed, “you end up with the greatest change tracking/archiving tool at your disposal,” one user [raves](https://www.faesel.com/blog/why-every-developer-needs-to-use-obsidian).\n\n### How is Obsidian.md different from other markdown languages?\n\nObsidian markdown [differs from other markdown editors](https://cylab.be/blog/149/what-is-obsidianmd-and-why-you-should-use-it) in that it uses the “Linked Thought” feature, which refers to a group of note-taking applications that allow you to link thoughts and notes together seamlessly. Because it is based on the [Markdown language](https://en.wikipedia.org/wiki/Markdown), it is light-weight. The tool expands on the markdown language with additional functionality, such as creating links between files, offering \"hover over preview\" of links and easy inclusion and management of sources.\n\nFor example, Obsidian lets you hover over any links added to a document and see a small preview of what the links refers to. You just need to position your mouse over the \"Format your notes\" link.\n\n### Some notable features of Obsidian.md\n\nThere’s a visually-striking graph view that’s acts as a map of all your files stored in Obsidian. There is also a markdown format importer that can find and replace certain Markdown syntax elements in your files, and support for [math and diagram](https://publish.obsidian.md/help/How+to/Format+your+notes) syntax.\n\nAlso noteworthy is that Obsidian makes it easy to publish notes online and it stores all of your files in plaintext markdown files.\n\nObsidian supports CommonMark and GitHub Flavored Markdown (GFM) so you can embed notes and other files. It stores data in folders of markdown files so you can access your notes with other text editors or markdown apps. Obsidian also lets you open existing folders of markdown files.\n\n## Is Obsidian good for notes?\n\nObsidian is a very capable, free note-taking app (with advanced, paid tiers available as well). It touts itself as a [“second brain”](https://obsidian.md/) that is good for creating a knowledge base, markdown file editor and linking notes together. It is designed to take notes quickly and is easy to use, making it an ideal app. You just open the app, create a new note and start typing.\n\nIt works across multiple platforms, including Windows, iOS, Android and Linux.\n\nObsidian has been called the [“most advanced note-taking app.”](https://deskoflawyer.com/secure-note-taking-apps/)\n\n## Setting up Obsidian notes\n\nOnce you download the app, you will see the main Obsidian window, which has the different options on the left, then the folder/files panel and the composition area where you an create and edit your notes.\n\nThere are four icons on the left side: collapse panel, open quick switcher, open graph view, and open markdown importer. The collapse panel shows (or hides) the left panel.When you tap the open quick switcher button, it brings up a text box where you can begin to type. The open graph view shows a graph listing the connections each page has. The open markdown importer lets you import markdown files into Obsidian from other applications.\n\nYou’ll also see three buttons: \n\n1. Open another vault \n2. Help\n3. Settings\n\nThe vault refers to a collection of notes that you can open or create.\n\nYou have the option of either creating a note directly or creating a note via a link. In the former instance, in the folder panel, click on the “new note” button or use the keyboard shortcut for Windows: Control N, or for Mac: Command N. Now you’ve created a new note.\n\nAn interesting time-saving feature is that you can create a note via a link and assign a name to that new note. You have to click on the link to actually create it.\n\nYou can find a helpful guide [here](https://www.sitepoint.com/obsidian-beginner-guide/).\n\n## Organizing an Obsidian note using folders\n\nWhen you begin using Obsidian you have to designate where you want to keep your notes. If you already have your notes in markdown format in a folder, you would choose the “open folder as vault” option. Otherwise, you can create a new vault and choose a location to store your notes.\n\nYou can drag and drop notes to move them around. There are three icons at the top pane that allow you to create a new note, make a new folder, or change the sorting order.\n\nObsidian has a powerful search feature that checks the content of your notes and returns all results very quickly. Access it by clicking on the magnifying glass icon at the top to begin a  search of your notes.\n\nYou’ll already be in editor mode by default when you open Obsidian and you can edit your notes or write new ones. All markdown syntax is visible in this mode. Press Ctrl + E to switch to preview mode, and the syntax will disappear and the note will appear formatted.\n\nIf you type a hashtag before a word, Obsidian will detect it and assign it to the note, regardless of where it is in your text.\n\n## Get going with Obsidian.md\n\n[Obsidan.md](https://obsidian.md), at it's core, is an application that helps manage your markdown files. You can download the application\nvia their site and create a \"workspace\" folder when you first start the application. When using the application, all of your notes\nwill be created in the folder you choose as your \"workspace\".\n\n![Obsidian application](https://about.gitlab.com/images/blogimages/obsidian_md.png){: .shadow}\n\n### Workspace file structure\n\nInside your Obsidian workspace, you can have any number of folders and markdown files. When you open a folder in Obsidian as your \"workspace\",\nObsidian will automatically add a folder `.obsidian`, which contains your workspace configuration such as application styles and plugins.\nA basic workspace file structure could look something like this:\n\n```\n.\n├── workspace_folder/\n│   └── Other pages/\n│   │   └── Another page.md\n│   └── .obsidian\n│   └── index.md\n```\n\n`index.md`\n```markdown\n# Home\n\nThis is a basic home page, and a link to another page in my documents.\n\nSee [[Another page]] - note that this link uses wikilinks which Obsidian uses to help you easily link to other notes in your workspace.\n```\n\n`Other pages/Another page.md`\n```markdown\n# Another page\n\nThis is another page besides the home page.\n```\n\n## Generating a static site to host your notes\n\nIn order to publish your notes to GitLab Pages, you need to create a static site to show and navigate your notes.\nThere are several open source tools that generate static sites from Markdown documents. After experimenting\nwith a few, I found [MkDocs](https://www.mkdocs.org/) to be the easiest and most compatible with Obsidian.\n\nIf you would like to use MkDocs locally, you can install it with `pip install mkdocs`\n(Python and [pip as package manager](https://pypi.org/project/pip/) are required).\nThis is not necessary, because in this tutorial we'll utilize GitLab CI pipelines to install MkDocs and build our site.\n\nThere are two small steps you need to make in order to get your existing Obsidian notes working with MkDocs.\n\n### File structure\n\nAll files that are not your workspace notes will be created outside of your workspace folder. The following folder structure is\nhow this final demo project is going to look.\n\n```\n.\n├── wiki/\n│   └── .obsidian\n│   └── index.md\n├── .gitlab-ci.yml\n├── mkdocs.yml\n└── requirements.txt\n```\n\n - `wiki/` - this is your Obsidian workspace folder\n - `.obsidian` - the application configuration folder Obsidian uses for your workspace. This will not affect the site.\n - `index.md` - MkDocs looks for `index.md` in your workspace folder to use as your site's home page.\n - `.gitlab-ci.yml` - the GitLab CI configuration file used to deploy your site.\n - `mkdocs.yml` - the MkDocs configuration file use to build and customize your site.\n - `requirements.txt` - this file defines the Python package dependencies for MkDocs.\n\n### Basic MkDocs Configuration\n\nYou'll need to create a configuration file `mkdocs.yml` for MkDocs to know how you would like your site to look.\nHere are the first four lines we need to configure our notes.\n\n```yaml\nsite_name: My Obsidian Notes\nsite_url: https://group-name.gitlab.io/repo-name\nsite_dir: public\ndocs_dir: ./wiki\n```\n\n- `site_name` - is what will be used as the main title for the web site.\n- `site_url` - is used as the \"canonical URL\" of the site. You will need to use [the default URL provided by GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/getting_started_part_one.html#gitlab-pages-default-domain-names) or your custom domain here.\n- `site_dir` - GitLab Pages requires HTML source code to be contained in a `public` folder. This setting tells MkDocs to put the generated files in the `public` folder.\n- `docs_dir` - this is the relative path to your workspace folder. I like to name mine `wiki` because it's my personal wikipedia. You can name this folder whatever you want.\n\nWe'll come back to this configuration file later to add more custom styles to your site.\n\n## Configuring GitLab CI\n\nWe need to configure a GitLab CI job to install MkDocs and build the web site based on our Obsidian notes. The following\n`.gitlab-ci.yml` file has the basic setup for this:\n\n```yaml\nimage: python:3.8-slim\n\npages:\n  stage: deploy\n  script:\n    # Install all of the python packages for mkdocs\n    - pip install -r requirements.txt\n    # Build the site using mkdocs\n    # --strict aborts the build on any warnings\n    # --verbose enables verbose output so that it's easier to see what mkdocs is doing\n    # neither --strict nor --verbose are necessary if you choose not to use them\n    - mkdocs build --strict --verbose\n  artifacts:\n    paths:\n      - public\n  only:\n    - main\n```\n\nThis job will only run when a change is made to the default branch (`main` in this case).\n\n### Python Packages\n\nNote the line `pip install -r requirements.txt` in the above `.gitlab-ci.yml` file. This line is installing MkDocs and any\nadditional plugins you use to customize your site. You'll need to create a `requirements.txt` file for this script to work:\n\n```text\n# Documentation static site generator & deployment tool\nmkdocs>=1.1.2\n```\n\nWe'll come back to this `requirements.txt` file to add a couple more packages to customize our site later.\n\n## Customizing your site\n\nOne of the benefits of using MkDocs is that it has a lot of extensions you can add on to customize your site. You can\nchange the theme of the site, which adjusts the colors and layout. You can also add extensions that improve how your\nmarkdown notes are displayed and interacted with on the site.\n\n### Theme\n\nMkDocs includes two built-in themes (`mkdocs` and `readthedocs`), [as documented on their website](https://www.mkdocs.org/user-guide/choosing-your-theme/).\nThere are also a lot of [community built themes](https://github.com/mkdocs/mkdocs/wiki/MkDocs-Themes) you can search through and choose to use.\nMy current favorite theme is [Material](https://github.com/mkdocs/mkdocs/wiki/MkDocs-Themes#material-for-mkdocs-). You can install it by adding it our `requirements.txt` and choosing\nit as your theme in the `mkdocs.yml` configuration file, or if you are installing it locally you can install it with `pip install mkdocs-material`.\n\n`requirements.txt`\n```text\n# Material theme\nmkdocs-material>=8.1.7\n```\n\n`mkdocs.yml`\n```yaml\ntheme:\n  name: material\n  palette:\n    scheme: slate\n```\n\nI have chosen the `slate` scheme for the material theme which makes it darker. You can choose more configuration options\nbased on [their website documentation](https://squidfunk.github.io/mkdocs-material/setup/changing-the-colors/).\n\n### Extensions\n\nMkDocs includes [built-in extensions](https://www.mkdocs.org/user-guide/configuration/#markdown_extensions) that you can add to your `mkdocs.yml` configuration file. The\n[Material](https://github.com/mkdocs/mkdocs/wiki/MkDocs-Themes#material-for-mkdocs-) theme package also comes with many more extensions that we can use. Below are some of my favorite\nfor working with Obsidian:\n\n```yaml\n# Extensions\nmarkdown_extensions:\n  - footnotes\n  - attr_list\n  - pymdownx.highlight\n  - pymdownx.superfences\n  - pymdownx.details\n  - pymdownx.magiclink\n  - pymdownx.tasklist\n  - pymdownx.emoji\n  - admonition\n  - toc:\n    permalink: true\n```\n\n- `footnotes` - adds the ability to define inline footnotes, whech are then rendered below all Markdown content of a document. [See documentation here](https://squidfunk.github.io/mkdocs-material/reference/footnotes/).\n- `attr_list` - allows you to add HTML attributes and CSS classes to almost every Markdown inline and block-level element with special syntax. [See documentation here](https://squidfunk.github.io/mkdocs-material/setup/extensions/python-markdown/#attribute-lists).\n- `pymdownx.highlight` - adds support for syntax highlighting of code blocks. [See documentation here](https://facelessuser.github.io/pymdown-extensions/extensions/highlight/).\n- `pymdownx.superfences` - allows for arbitrary nesting of code and content blocks inside each other. [See documentation here](https://facelessuser.github.io/pymdown-extensions/extensions/superfences/).\n- `pymdownx.details` - allows for creating collapsible content blocks. [See documentation here](https://facelessuser.github.io/pymdown-extensions/extensions/details/).\n- `pymdownx.magiclink` - provides a number of useful link related features such as auto-link HTML and emails. [See documentation here](https://facelessuser.github.io/pymdown-extensions/extensions/magiclink/).\n- `pymdownx.tasklist` - adds support for tasklist syntax. [See documentation here](https://facelessuser.github.io/pymdown-extensions/extensions/tasklist/).\n- `pymdownx.emoji` - adds support for inserting emoji via simple short names enclosed within colons (`:short_name:`). [See documentation here](https://facelessuser.github.io/pymdown-extensions/extensions/emoji/).\n- `admonition` - allows you to create \"callouts\" in your documentation. [See documentation here](https://squidfunk.github.io/mkdocs-material/reference/admonitions/).\n- `toc:permalink` - adds a table of contents to your page based on your markdown document, and ensures each link is a permanent link that can be reused. [See documentation here](https://python-markdown.github.io/extensions/toc/).\n\n### Plugins\n\nMkDocs also has a community of plugins that add more features when building your site. MkDocs includes some plugins by default that you can use in the configuration file, but in order to use community plugins you have to add them to the\n`requirements.txt` file to be installed as packages. The following two plugins are ones that I've found useful, but you\ncan look at [the list of community plugins here](https://github.com/mkdocs/mkdocs/wiki/MkDocs-Plugins):\n\n```yaml\nplugins:\n  - search\n  - roamlinks\n```\n\n- `search` - provides a search bar at the top of your site to easily search your documents. [See documentation here](https://www.mkdocs.org/user-guide/configuration/#search).\n- `roamlinks` - adds support for Obsidian's wikilinks feature. [See documentation here](https://github.com/Jackiexiao/mkdocs-roamlinks-plugin).\n\n`requirements.txt`\n```text\n# Wikilinks support\nmkdocs-roamlinks-plugin>=0.1.3\n```\n\nIf installing locally, you can install roamlinks with `pip install mkdocs-roamlinks-plugin`.\n\n## Combining it all together\n\nAfter all of the above work is done, you should have a file structure that looks like this:\n\n```\n.\n├── wiki/\n│   └── .obsidian\n│   └── index.md\n├── .gitlab-ci.yml\n├── mkdocs.yml\n└── requirements.txt\n```\n\nHere are the contents of the three main files that you've been editing:\n\n`.gitlab-ci.yml`\n```yaml\nimage: python:3.8-slim\n\npages:\n  stage: deploy\n  script:\n    - pip install -r requirements.txt\n    - mkdocs build --strict --verbose\n  artifacts:\n    paths:\n      - public\n  only:\n    - main\n```\n\n`mkdocs.yml`\n```yaml\nsite_name: My Obsidian Notes\nsite_url: https://group-name.gitlab.io/repo-name\nsite_dir: public\n\ntheme:\n  name: material\n  palette:\n    scheme: slate\n\n# Extensions\nmarkdown_extensions:\n  - footnotes\n  - attr_list\n  - pymdownx.highlight\n  - pymdownx.superfences\n  - pymdownx.details\n  - pymdownx.magiclink\n  - pymdownx.tasklist\n  - pymdownx.emoji\n  - admonition\n  - toc:\n    permalink: true\n\nplugins:\n  - search\n  - roamlinks\n```\n\n`requirements.txt`\n```text\n# Documentation static site generator & deployment tool\nmkdocs>=1.1.2\n\n# Material theme\nmkdocs-material>=8.1.7\n\n# Wikilinks support\nmkdocs-roamlinks-plugin>=0.1.3\n```\n\nNow that your files are all finished, the last step is to push your changes to your GitLab repository and wait for your pipeline\nto finish. Once finished, you can go to [your default domain provided by GitLab](https://docs.gitlab.com/ee/user/project/pages/getting_started_part_one.html#gitlab-pages-default-domain-names) or you can\n[configure GitLab Pages to use a custom domain](https://docs.gitlab.com/ee/administration/pages/index.html).\n\nHere's a screenshot of the demonstration site created in this tutorial:\n\n![Obsidian application](https://about.gitlab.com/images/blogimages/obsidian_mkdocs_site.png){: .shadow}\n\n## Is the Obsidian note-taking secure?\n\nUsers overall believe Obsidian is safe to use. One user said you [maintain full control](https://becomeawritertoday.com/obsidian-review/) over your notes and it provides the ability to encrypt your vault.\n\n[This lawyer](https://deskoflawyer.com/secure-note-taking-apps/) maintains that Obsidian is the most-secure note-taking app available. Others claim there are [no security threats](https://thebusinessblocks.com/is-obsidian-one-of-the-most-secure-and-best-notetaking-apps/) with Obsidian and users don’t have to worry about data being lost or transferred to third parties.\n\nBecause your files are stored on your own computer, this keeps your data safe and private according to another [user](https://www.online-tech-tips.com/computer-tips/how-to-use-obsidian-as-a-personal-wiki-on-your-computer/).\n\n### Where to find more information on Obsidian markdown\n\nYou can find more information in this [Obsidian markdown guide](https://www.markdownguide.org/tools/obsidian/). An Obsidian roadmap is available [here](https://trello.com/b/Psqfqp7I/obsidian-roadmap). Of course, you can also go to the [Obsidan website](https://obsidian.md/).\n\nShare your Obsidian.md deployments in the comments.\n",[726],{"slug":2914,"featured":6,"template":678},"publishing-obsidian-notes-with-gitlab-pages","content:en-us:blog:publishing-obsidian-notes-with-gitlab-pages.yml","Publishing Obsidian Notes With Gitlab Pages","en-us/blog/publishing-obsidian-notes-with-gitlab-pages.yml","en-us/blog/publishing-obsidian-notes-with-gitlab-pages",{"_path":2920,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2921,"content":2927,"config":2933,"_id":2935,"_type":16,"title":2936,"_source":17,"_file":2937,"_stem":2938,"_extension":20},"/en-us/blog/installing-gitlab-on-raspberry-pi-64-bit-os",{"title":2922,"description":2923,"ogTitle":2922,"ogDescription":2923,"noIndex":6,"ogImage":2924,"ogUrl":2925,"ogSiteName":692,"ogType":693,"canonicalUrls":2925,"schema":2926},"Installing GitLab on Raspberry Pi 64-bit OS","A Raspberry Pi enthusiast tries to run GitLab on the new 64-bit OS...and here's what happened.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679433/Blog/Hero%20Images/anto-meneghini-gqytxsrctvw-unsplash.jpg","https://about.gitlab.com/blog/installing-gitlab-on-raspberry-pi-64-bit-os","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Installing GitLab on Raspberry Pi 64-bit OS\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2022-03-14\",\n      }",{"title":2922,"description":2923,"authors":2928,"heroImage":2924,"date":2929,"body":2930,"category":14,"tags":2931},[2558],"2022-03-14","\n\n_This blog post and linked pages contain information related to upcoming products, features, and functionality. It is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes.\nAs with all projects, the items mentioned in this blog post and linked pages are subject to change or delay. The development and release, and timing of any products, features or functionality remain at the sole discretion of GitLab Inc._\n\nRecently the 64-bit version of [Raspberry Pi OS](https://www.raspberrypi.com/software/) came out of a long-awaited beta, and as a Raspberry Pi enthusiast, I was eager to get my hands on it. While the 64-bit version isn't compatible with all Pi hardware, it's exciting to see the expansion of the ecosystem to allow for better access to RAM and software compatibility as 32-bit support becomes less common.\n\nBut speaking of software support - what about running GitLab on the new 64-bit OS? Did you know that GitLab already has support for [Raspberry Pi OS](/install/#raspberry-pi-os)? We even have documentation on [optomizing GitLab on a Raspberry Pi](https://docs.gitlab.com/omnibus/settings/rpi.html) for folks who want to run their self-hosted DevOps platform on simple hardware like the Pi?\n\nNow, the distribution team would want me to point out that official support for ARM64 is still [in the works](https://gitlab.com/groups/gitlab-org/-/epics/2370), but that didn't stop me from at least wanting to try to install GitLab on this exciting new platform. Remember that your mileage may vary - and don't use this in production as it isn't yet officially supported.  \n\nBut that's never stopped me before, so I grabbed my Raspberry Pi 4, a new Micro SD card, and the updated [Raspberry Pi Imager](https://downloads.raspberrypi.org/imager/imager_latest.dmg) and got started.\n\n## Getting Started\n\nThe typical [install for GitLab on the Raspberry Pi](/install/#raspberry-pi-os) assumes you have the standard 32-bit version of `raspbian/buster` that has been standard for some time. So following those steps, I ran into an error with the install script.\n\nWhen running \n\n```bash \nsudo curl -sS https://packages.gitlab.com/install/repositories/gitlab/raspberry-pi2/script.deb.sh | sudo bash\n```\n\nIt appeared to work, but if I tried to install GitLab I'd get this error\n\n```bash\n$ sudo EXTERNAL_URL=\"https://gitpi.boleary.dev\" apt-get install gitlab-ce\n\nReading package lists... Done\nBuilding dependency tree... Done\nReading state information... Done\nPackage gitlab-ce is not available, but is referred to by another package.\nThis may mean that the package is missing, has been obsoleted, or\nis only available from another source\n \nE: Package 'gitlab-ce' has no installation candidate\n```\nThat's related to the fact that specifically this version of Raspberry Pi OS isn't supported yet - but since it is a fork of Debian Linux, I was able to work around that.\n\n## Manual Installation\n\nTo get started with a slightly modified installation path, I first got the package details and appropriate prerequisite libraries installed:\n\n```bash\ncurl -s https://packages.gitlab.com/install/repositories/gitlab/gitlab-ce/script.deb.sh | sudo bash\n\nsudo apt-get update\n\nsudo apt-get install debian-archive-keyring\n\nsudo apt-get install curl gnupg apt-transport-https\n\ncurl -L https://packages.gitlab.com/gitlab/gitlab-ce/gpgkey | sudo apt-key add -\n```\n\nThen I created a new sources list to point `apt` to for the installation with `sudo touch /etc/apt/sources.list.d/gitlab_gitlab-ce.list`\n\nNext, I manually added the Debian Buster repositories to that sources list I just created by modifying  `/etc/apt/sources.list.d/gitlab_gitlab-ce.list` to add:\n\n```\ndeb https://packages.gitlab.com/gitlab/gitlab-ce/debian/ buster main\ndeb-src https://packages.gitlab.com/gitlab/gitlab-ce/debian/ buster main\n```\n\n## Finishing Up\nFrom there, it was easy to install the 'standard' way, with apt-get handling the rest for me.\n\n```bash\nsudo apt-get update\n\nsudo EXTERNAL_URL=\"http://gitpi.boleary.dev\" apt-get install gitlab-ce\n```\n\n## Next Steps\n\nNow, those who love DNS will notice that I was pointing to a fully qualified domain name, but it points to a private address if you look up that address.\n\n```bash\ndig gitpi.boleary.dev\n; \u003C\u003C>> DiG 9.10.6 \u003C\u003C>> gitpi.boleary.dev\n;; OPT PSEUDOSECTION:\n; EDNS: version: 0, flags:; udp: 512\n;; QUESTION SECTION:\n;gitpi.boleary.dev.\t\tIN\tA\n\n;; ANSWER SECTION:\ngitpi.boleary.dev.\t300\tIN\tA\t100.64.205.40\n```\n\nIsn't that interesting?  What does it mean - can I access it from outside my house's network?  And how will I get it to work with HTTPs on that private address?\n\nFor those answers, you'll have to stay tuned to my next article about running GitLab on the Raspberry Pi: Hosting a private GitLab server with Tailscale and LetsEncrypt.\n\nPhoto by \u003Ca href=\"https://unsplash.com/@antomeneghini?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Anto Meneghini\u003C/a> on \u003Ca href=\"https://unsplash.com/s/photos/raspberries?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Unsplash\u003C/a>\n  \n",[2932,232,704],"demo",{"slug":2934,"featured":6,"template":678},"installing-gitlab-on-raspberry-pi-64-bit-os","content:en-us:blog:installing-gitlab-on-raspberry-pi-64-bit-os.yml","Installing Gitlab On Raspberry Pi 64 Bit Os","en-us/blog/installing-gitlab-on-raspberry-pi-64-bit-os.yml","en-us/blog/installing-gitlab-on-raspberry-pi-64-bit-os",{"_path":2940,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2941,"content":2946,"config":2952,"_id":2954,"_type":16,"title":2955,"_source":17,"_file":2956,"_stem":2957,"_extension":20},"/en-us/blog/sharing-slis-across-departments",{"title":2942,"description":2943,"ogTitle":2942,"ogDescription":2943,"noIndex":6,"ogImage":2478,"ogUrl":2944,"ogSiteName":692,"ogType":693,"canonicalUrls":2944,"schema":2945},"How we share SLIs across engineering departments","The Scalability team engages with the Development department for collaborating on SLIs. The first post in this series explains how we made available information accessible for development groups.","https://about.gitlab.com/blog/sharing-slis-across-departments","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we share SLIs across engineering departments\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Bob Van Landuyt\"}],\n        \"datePublished\": \"2022-03-10\",\n      }",{"title":2942,"description":2943,"authors":2947,"heroImage":2478,"date":2949,"body":2950,"category":14,"tags":2951},[2948],"Bob Van Landuyt","2022-03-10","\nAt GitLab everyone can contribute to GitLab.com's availability. We\nmeasure the availability using several Service Level Indicators (SLIs)\nBut it's not always easy to see how the features you're building are\nperforming. GitLab's features are divided amongst development groups,\nand every group has [their own dashboard](https://docs.gitlab.com/ee/development/stage_group_observability/index.html)\ndisplaying an availability score.\n\n![Stage group availability](https://about.gitlab.com/images/blogimages/2022-02-share-infrastructure-slis/2022-02-23-code_review_availability.png)\n\nWhen a group's availability goes below 99.95%, we work with the group\non figuring out why that is and how we can improve the performance or\nreliability of the features that caused their number to drop. The\n99.95% service level objective (SLO) is the same target the\ninfrastructure department has set for\n[GitLab.com availability](/handbook/engineering/infrastructure/performance-indicators/#gitlabcom-availability).\n\nBy providing specific data about how features perform on our production systems, it has become easier to recognize when it is important to prioritize performance and availability work.\n\n## Service availability on GitLab.com\n\nOur infrastructure is separated into multiple services, handling\ndifferent kinds of traffic but running the same monolithic Rails\napplication. Not all features have a similar usage pattern. For\nexample, on the service handling web requests for GitLab.com we see a\nlot more requests related to `code_review` or `team_planning` than we\ndo related to `source_code_management`. It's important that we\nlook at these in isolation as well as a service aggregate.\n\nThere's nobody who knows better how to interpret these numbers in\nfeature aggregations than the people who build these features.\n\nThis number is sourced by the same SLIs that we use to monitor\nGitLab.com's availability. We calculate this by dividing the number of\nsuccessful measurements by the total number of measurements over the\npast 28 days. A measurement could be several things, most commonly a\nrequest handled by our Rails application or a background job.\n\n## Monitoring feature and service availability\n\nFor monitoring GitLab.com we have Grafana dashboards, generated using\n[Grafonnet](https://grafana.github.io/grafonnet-lib/), that show these\nsource metrics in several dimensions. For example, these are error\nrates of our monolithic Rails application, separated by feature:\n\n![Puma SLI by feature](https://about.gitlab.com/images/blogimages/2022-02-share-infrastructure-slis/2022-02-23-puma_sli_per_feature.png)\n\nWe also generate [multiwindow, multi-burn-rate alerts](https://sre.google/workbook/alerting-on-slos/#short_and_long_windows_for_alerting)\nas defined in Google's SRE workbook.\n\n![Puma SLI error rate and requests per second](https://about.gitlab.com/images/blogimages/2022-02-share-infrastructure-slis/2022-02-23-puma_sli.png)\n\nThe red lines represent alerting thresholds for a burn rate. The\nthin threshold means we'll alert if the SLI has spent more than 5%\nof its monthly error budget in the past 6 hours. The thicker\nthreshold means we'll alert when the SLI has not met SLO for more than\n2% of measurements in the past hour.\n\nBecause both GitLab.com's availability number and the availability\nnumber for development groups are sourced by the same metrics, we\ncan provide similar alerts and graphs tailored to the\ndevelopment groups. Features with a relatively low amount of traffic would not easily show\nproblems in our bigger service aggregations. With this mechanism we can see those problems\nand put them on the radar of the teams building those features.\n\n## Building and adoption\n\nIn upcoming posts, we will talk about how we built this tooling and how we worked with other teams to have this adopted into the product prioritization process.\n\n## Related content\n\n- [Our project to provide more detailed data on the stage group dashboards](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/664)\n- [Development documentation for how to change dashboard content](https://docs.gitlab.com/ee/development/stage_group_observability/index.html)\n",[1286,704,727,1347],{"slug":2953,"featured":6,"template":678},"sharing-slis-across-departments","content:en-us:blog:sharing-slis-across-departments.yml","Sharing Slis Across Departments","en-us/blog/sharing-slis-across-departments.yml","en-us/blog/sharing-slis-across-departments",{"_path":2959,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2960,"content":2966,"config":2972,"_id":2974,"_type":16,"title":2975,"_source":17,"_file":2976,"_stem":2977,"_extension":20},"/en-us/blog/efficient-pipelines",{"title":2961,"description":2962,"ogTitle":2961,"ogDescription":2962,"noIndex":6,"ogImage":2963,"ogUrl":2964,"ogSiteName":692,"ogType":693,"canonicalUrls":2964,"schema":2965},"Extract greater efficiency from your CI pipelines","Learn some techniques to find the balance between pipeline performance and resource utilization.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667534/Blog/Hero%20Images/ci-pipeline.jpg","https://about.gitlab.com/blog/efficient-pipelines","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Extract greater efficiency from your CI pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vlad Budica\"}],\n        \"datePublished\": \"2022-03-09\",\n      }",{"title":2961,"description":2962,"authors":2967,"heroImage":2963,"date":2969,"body":2970,"category":14,"tags":2971},[2968],"Vlad Budica","2022-03-09","\nWhen discussing efficiency, typically we need to balance two things: time and money. It's quite easy to optimize for just one of these parameters. However, that can be an oversimplification. Within some constraints, more resources (i.e., hardware and Runners) equal better performance. Yet, the exact opposite is true for other constraints. In this article, I will walk you through the process of finding the sweet spot in optimizing your GitLab CI pipeline. The principles that I'll cover work well for existing pipelines and also for new ones. Please note that this is subjective and the sweet spot might be very different for different users in different scenarios.\n\nAs we dig into the technical aspects, note that we are looking for an overall optimization of a pipeline, as opposed to just looking at a particular job. The reasoning behind it is that local optimizations might make the overall pipeline less efficient (we might generate bottlenecks).\n\nThe optimization recommendations below fall into two categories:\n- Execute fewer jobs and pipelines\n- Shorten the execution time of jobs and pipelines\n\nThe first step before modifying an aspect of a system is to understand it. Observe it in full. You need to know the overall pipeline architecture and also the current metrics for it. You need to know the total execution time, jobs that take a large amount of time to finish (any bottlenecks), and the total job workload (potential queue time) and Runner capacity – these last two go hand in hand. Finally, we can use [Directed Acyclic Graphs](https://docs.gitlab.com/ee/ci/directed_acyclic_graph/), or DAGs, to visualize the pipeline and see the critical path (the minimum and maximum pipeline duration). We want to do this because we want to minimize as much as possible the detrimental impact doing changes can have on pipeline performance.\n\n## Execute fewer jobs and pipelines\n\nLet's look at ways of reducing the number of jobs and pipelines that get executed.\n\n### Apply rules\n\nThe first thing would be to decide what needs to be executed and when. For example, with a website, if the only change that was performed is to the text on the page, then the resulting pipeline doesn't need to contain all the tests and checks that are performed when changing the web app.\n\nThis requires the use of the [rules keyword](https://docs.gitlab.com/ee/ci/yaml/#rules). Rules are evaluated when a pipeline is created (at each trigger), and evaluated in order until the first match. When a match is found, the job is either included or excluded from the pipeline, depending on the configuration.\n\nThrough the rules keyword you can decide very precisely when a job should run or not. More information about use cases and configuration parameters can be found in the [doc page for rules](https://docs.gitlab.com/ee/ci/yaml/#rules).\n\n### Make jobs interruptible\n\nNow that jobs are only running when needed, you can focus on what happens when a new pipeline is triggered while a job is still running. This can lead to inefficiencies because we already know the job isn't running on the latest change performed on the target branch and that the results will get scrapped.\n\nThis is where the [interruptible keyword](https://docs.gitlab.com/ee/ci/yaml/#interruptible) comes in. It enables us to specify that a job can be interrupted when a newer one is triggered on the same branch. This should be coupled with the [automatic cancellation of redundant pipelines feature](https://docs.gitlab.com/ee/ci/pipelines/settings.html#auto-cancel-redundant-pipelines) so, in the end, jobs will be automatically canceled when newer pipelines are triggered.\n\nOne word of caution, use this mechanism only with jobs that are safe to stop such as a build or a test job. Don't use this with your deployment jobs as you're eventually going to end up with partial deployments. \n\nOne last point around executing fewer jobs and pipelines is to try to reschedule non-essential pipelines to as least frequent as possible. It's a balance that needs to be found between running the pipelines too often and not running them enough. Just go with the minimum acceptable by your company policy.\n\n## Shorten the execution time of jobs and pipelines\n\nThe next thing would be to find ways of making our jobs and pipelines execute in less time.\n\n### Execute jobs in parallel\n\nYou can [create DAGs in your pipelines](https://docs.gitlab.com/ee/ci/directed_acyclic_graph/) to create relationships between jobs and ensure that jobs are executed as soon as all the requirements are met if there are any and that they aren't waiting unnecessarily for other jobs to finish. By using the [needs keyword](https://docs.gitlab.com/ee/ci/yaml/#needs) together with the [parallel keyword](https://docs.gitlab.com/ee/ci/yaml/#parallel), you can implement DAGs.\n\nAnother useful mechanism to drive parallelism is [parent-child pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html), which enable you to trigger concurrently running pipelines.\n\nThese offer great flexibility and by using them you can execute your workloads in parallel as efficiently as possible. This can be a double-edged sword though as DAGs and [parent-child pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html) will increase the complexity of your pipelines, making them harder to analyze and understand. Within this very complex environment, you can run into unwanted side effects such as increased cost or even reduced efficiency.\n\nThe more jobs and pipelines you run in parallel, the more load will be put on your Runner infrastructure. If you do have an autoscaling mechanism and a large enough pool of resources, this will ensure no big queues are created and that things are running smoothly, but also lead to increased infrastructure costs. On the other hand, if you don't have autoscaling or if you have lower limits for the amount of resources available, the costs will be kept in check but your overall execution time will suffer because jobs will wait longer in queues.\n\n### Fail fast\n\nIt's desirable to detect errors and critical failures as soon as possible in your jobs and pipelines, and stop the execution. If you wait until toward the end of the pipeline to fail, the whole pipeline will waste hardware resources and increase your execution and waiting times. This is easier to implement when first designing a pipeline but can be achieved as well through refactoring of your existing ones.\n\nTesting usually takes a lot of time so this means that we're waiting for the execution to finish before canceling the whole pipeline if the tests fail. What we want to do is move the jobs that run quicker earlier in the pipeline thus getting feedback sooner. To configure this behavior, use the [allow_failure keyword](https://docs.gitlab.com/ee/ci/yaml/#allow_failure) and only for jobs that when fail should fail the whole pipeline.\n\n### Caching\n\nYou can also optimize the caching of your dependencies, which will improve the execution time. This can be very useful for jobs that fail often but for which the dependencies don't change that often.\n\nTo configure this in your jobs, you should use the [cache:when keyword](https://docs.gitlab.com/ee/ci/yaml/#cachewhen).\n\n### Optimize your container images\n\nUsing big images in your pipelines can slow things down significantly, as they take longer to be pulled. So the solution would be to use smaller images. Simple, right?\n\nWell, it's not always that easy to do, so you should start by analyzing your base image and your network speed as these two will give an indication of how long it will take for the image to be pulled. The network connection we're interested in is the one between your Runner and your container registry.\n\nOnce we have this kind of information, we can decide to host the image in another container registry. If you have GitLab hosted in a public cloud you should use the container image registry provided by that provider. An alternative that works no matter where GitLab is hosted is to use the internal GitLab container registry that's included with your service.\n\nYou will get better results if instead of using a master container image that holds everything that you need to run the whole pipeline, you use multiple smaller ones that are tailored for each job. It's faster if you use custom container images and have all the tools you need pre-installed. This would also be a safer option because you can validate more thoroughly the contents of the image.\n\nMore information about this topic can be found in [Docker's \"Best practices for writing Dockerfiles\"](https://docs.docker.com/develop/develop-images/dockerfile_best-practices/).\n\n## Pipeline optimization is part science, part art\n\nYou should approach your pipeline optimization efforts through a continuous improvement lens. This process is part science, part art as there aren't any quick solutions that you can apply and get your ideal result.\n\nI encourage you to test, document, and analyze the results when it comes to pipeline optimization efforts. You try one thing, look for feedback from the metrics of your pipelines, document the results, the changes, and the new architecture (this can happen in GitLab issues and merge requests) so you can extract some learnings, and the cycle starts again.\n\nSmall gains will add up and provide significant improvements at a higher scale. As I mentioned before, look for overall improvements instead of local ones. Now applying these principles to each project (pipeline templates makes it easier to adopt at scale), we can look at how these improvements across projects add up.\n\nRead more: Learn how to [troubleshoot a GitLab pipeline failure](/blog/how-to-troubleshoot-a-gitlab-pipeline-failure/).\n",[894,832,937],{"slug":2973,"featured":6,"template":678},"efficient-pipelines","content:en-us:blog:efficient-pipelines.yml","Efficient Pipelines","en-us/blog/efficient-pipelines.yml","en-us/blog/efficient-pipelines",{"_path":2979,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2980,"content":2986,"config":2991,"_id":2993,"_type":16,"title":2994,"_source":17,"_file":2995,"_stem":2996,"_extension":20},"/en-us/blog/how-to-protect-gitlab-connected-ssh-key-with-yubikey",{"title":2981,"description":2982,"ogTitle":2981,"ogDescription":2982,"noIndex":6,"ogImage":2983,"ogUrl":2984,"ogSiteName":692,"ogType":693,"canonicalUrls":2984,"schema":2985},"How to protect GitLab-connected SSH key with Yubikey","Add a layer of security to SSH keys by restricting physical access to YubiKey.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667202/Blog/Hero%20Images/gitlabultimatesecurity.jpg","https://about.gitlab.com/blog/how-to-protect-gitlab-connected-ssh-key-with-yubikey","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to protect GitLab-connected SSH key with Yubikey\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2022-03-03\",\n      }",{"title":2981,"description":2982,"authors":2987,"heroImage":2983,"date":2988,"body":2989,"category":14,"tags":2990},[2558],"2022-03-03","\n[Two-factor authentication](https://docs.gitlab.com/ee/security/two_factor_authentication.html) is one of the best defenses we have as individuals for protecting our accounts and credentials. But not all 2FA methods are created equal. For example, SMS is vulnerable to [SIM-swapping](https://www.ic3.gov/Media/Y2022/PSA220208) attacks and thus doesn't always provide the extra security we would like.\n\nIdeally, everything I  want to connect to would use 2FA with dedicated 2FA hardware. With GitLab 14.8, you can now use 2FA hardware to protect your SSH keys, as I explain below.  \n\n## 2FA and SSH keys\n\nState-of-the-art 2FA uses a physical hardware device – often FIDO/U2F hardware – to verify your presence at the time of authentication. This provides two distinct factors as a means of authentication: something you know (your username and password, for instance) with something you have (the physical device). I have two [YubiKey](https://www.yubico.com/works-with-yubikey/catalog/gitlab/) devices that I use for this purpose – one that is always in a safe in my house and one that I generally keep with me and the computer I'm using to do work. And I have everything I can secure using this method, including my GitLab account.\n\nAnd that does a great job of securing my access to GitLab, the application front end, and the ability to create and modify API keys. But there is another way to authenticate to a git server: SSH keys. In this case, there's only one factor of authorization because the SSH key is on my computer. So you can imagine how excited I was to hear that GitLab added support for `ecdsa-sk` and `ed25519-sk` key types in [GitLab 14.8](/releases/2022/02/22/gitlab-14-8-released/#support-for-ecdsa-sk-and-ed25519-sk-ssh-keys).\n\n### What are `ecdsa-sk` and `ed25519-sk`?\n\nThese two new keys are close to the existing `ecdsa` (Elliptic Curve Digital Signature Algorithm) and `ed25519` (Edwards Curve Digital Signature Algorithm) keys already supported. But that `-sk` at the end adds the ability to verify the key with a FIDO/U2F device. \"SK\" here stands for \"security key\". [OpenSSH 8.2](https://www.openssh.com/txt/release-8.2) added this key type to the supported keys it can generate, interacting with the hardware device to authenticate user presence before allowing the key to be used.\n\nHowever, I still had a few things to do to be ready to use the new keys.\n\n## Updating OpenSSH \nMy daily driver computer is a 2021 iMac running macOS Big Sur version 11.6. When I ran to it to generate this new key, I encountered a problem. Supposedly my version of SSH didn't support `-sk` keys!\n\nNow, your mileage may vary here, but I was able to update the version of SSH my Mac uses by default by first running `brew install openssh`, which successfully installed OpenSSH 8.8. But when I ran `ssh -V` it still showed version 8.1. So how could I get the system to use the newly installed OpenSSH instead?\n\nThe easiest way I could think of to do that was to put the Homebrew version first in the $PATH variable. But where is that path? Luckily, I was able to find that (`/opt/homebrew/opt/openssh`) by running this command:\n\n`brew --prefix openssh`\n\nOnce I updated my $PATH variable to have that at the front, I got the desired outcome:\n\n```bash\n$  which ssh\n/opt/homebrew/opt/openssh/bin/ssh\n\n$ ssh -V\nOpenSSH_8.8p1, OpenSSL 1.1.1m  14 Dec 2021\n```\n\n## Generating the key\nNow that I was using the correct version of SSH, I was able to create my `ecdsa-sk` key by running: \n\n```bash\nssh-keygen -t ecdsa-sk -f ~/.ssh/id_ecdsa_sk\n```\n\nNow, the specific device I have only supports ECDSA and not EdDSA, which is why I went with `ecdsa-sk`. There also is an option to have the key reside ON the device itself (if supported by your hardware) with the `-O resident` flag like this:\n\n```bash\n$ ssh-keygen -t ecdsa-sk -O resident -f ~/.ssh/id_ecdsa_sk\n\nEnter PIN for authenticator:\nYou may need to touch your authenticator (again) to authorize key generation.\nEnter passphrase (empty for no passphrase):\nEnter same passphrase again:\nYour identification has been saved in /Users/brendan/.ssh/id_ecdsa_sk\nYour public key has been saved in /Users/brendan/.ssh/id_ecdsa_sk.pub\n```\n\nGenerating a resident key will make sharing this key with a new computer if and when that happens much easier. If you have a YubiKey like me, you can set the FIDO2 PIN using the [YubiKey Manager](https://www.yubico.com/support/download/yubikey-manager/) software.\n\n## Adding the key to GitLab\nNow that I had the complex parts covered, all that was left was to add the key to GitLab. I went to my [SSH settings](https://gitlab.com/-/profile/keys) on GitLab.com and (bravely) deleted my old SSH key and added the `.pub` public part of my key to my profile.\n\nAnd it was that simple! Now every time I go to interact with GitLab.com, I'm prompted to confirm my presence by touching the YubiKey device attached to my computer:\n\n```bash\ngit clone git@gitlab.com:brendan/website.git\nCloning into 'website'...\nConfirm user presence for key ECDSA-SK SHA256:OZSZGwbnnbc...\n\ngit add .\ngit commit -m \"A new commit\"\ngit push\nConfirm user presence for key ECDSA-SK SHA256:OZSZGwbnnbc...\n```\n\nThat small but essential change gives me peace of mind that even if someone could somehow get my private SSH key, I would still be protected by having physical access restricted to my YubiKey.\n\n",[894,1307,726],{"slug":2992,"featured":6,"template":678},"how-to-protect-gitlab-connected-ssh-key-with-yubikey","content:en-us:blog:how-to-protect-gitlab-connected-ssh-key-with-yubikey.yml","How To Protect Gitlab Connected Ssh Key With Yubikey","en-us/blog/how-to-protect-gitlab-connected-ssh-key-with-yubikey.yml","en-us/blog/how-to-protect-gitlab-connected-ssh-key-with-yubikey",{"_path":2998,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2999,"content":3005,"config":3011,"_id":3013,"_type":16,"title":3014,"_source":17,"_file":3015,"_stem":3016,"_extension":20},"/en-us/blog/bringing-ai-gitlab-repository",{"title":3000,"description":3001,"ogTitle":3000,"ogDescription":3001,"noIndex":6,"ogImage":3002,"ogUrl":3003,"ogSiteName":692,"ogType":693,"canonicalUrls":3003,"schema":3004},"GitLab and Tabnine: AI-powered code completion for GitLab repositories","Development teams can get a custom AI model based on their private code that enables knowledge sharing, reduced technical debt, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682249/Blog/Hero%20Images/blog_2757.png","https://about.gitlab.com/blog/bringing-ai-gitlab-repository","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab and Tabnine: AI-powered code completion for GitLab repositories\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brandon Jung\"}],\n        \"datePublished\": \"2022-03-02\",\n      }",{"title":3000,"description":3001,"authors":3006,"heroImage":3002,"date":3008,"body":3009,"category":14,"tags":3010},[3007],"Brandon Jung","2022-03-02","\n\nAs AI continues to become more ubiquitous throughout [every aspect of our lives](https://www.tabnine.com/blog/is-ai-pair-programming-really-going-to-help-me/), it should come as little surprise that programming has picked it up as a tool to help make developers more productive. Tabnine is integrating with GitLab to bring Tabnine's AI-powered code completion technology to GitLab repositories to improve the accuracy and speed of code development.\n\nWe believe that increased development velocity improves the developer’s working experience, accelerates feature release cadence, and enables teams to respond faster to market opportunities. Users can now get a custom AI model based on their private code that enables: \n\n- Knowledge sharing\n- Reduced technical debt\n- Faster code reviews\n- Faster onboarding and time to value\n\nThe value of a custom model is about helping a specific team with a specific mission be more productive. A team comes in many forms from the most simple [two pizza box team](https://docs.aws.amazon.com/whitepapers/latest/introduction-devops-aws/two-pizza-teams.html) to a large software company with hundreds of internal developers as well as thousands of external developers who contribute to a large shared [open source code base](/community/contribute/). What all these teams have in common is that they have a shared interest in a common code base. This code base for any digital company is one of the most important strategic assets and anything that helps them build it faster and more consistently requires serious consideration.\n\nGitLab has a robust platform for hosting code for private teams, so it is natural that we wanted to make it easier for teams to bring their development models together. Developers can now automate the creation of a custom model based on their private code. The process is outlined below and is seamless for the user as Tabnine will build, validate, and upload the private model for the whole team. New developers can now be added to the team and will immediately receive custom suggestions based on the codified best practices of the team. \n\nThis is the first of ongoing work that Tabnine is doing to support developers together with GitLab and we look forward to getting your feedback on how we can make it better for you individually and for your team.\n\nHere's how to get started: \n\n1. As a Tabnine for Teams user, login to [AI Code Completions for Developers & Teams](https://app.tabnine.com/profile/) \n2. Navigate to the “Team AI” tab\n3. Connect to your GitLab repositories\n4. Tabnine will build, test, and upload your private team model\n5. Enjoy your personalized Tabnine AI assistant\n\n![Getting started](https://about.gitlab.com/images/blogimages/tabnine1.png){: .shadow}\n\nThe GitLab partnership represents Tabnine's latest step towards the goal of an end-to-end development platform supporting all developers regardless of working environments, coding languages, or IDEs. [Share your feedback with Tabnine](https://forms.gle/vCHK5QRoyR5xt6Jg8) on our AI-powered code completion technology.\n\n",[894,676,1084],{"slug":3012,"featured":6,"template":678},"bringing-ai-gitlab-repository","content:en-us:blog:bringing-ai-gitlab-repository.yml","Bringing Ai Gitlab Repository","en-us/blog/bringing-ai-gitlab-repository.yml","en-us/blog/bringing-ai-gitlab-repository",{"_path":3018,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3019,"content":3024,"config":3029,"_id":3031,"_type":16,"title":3032,"_source":17,"_file":3033,"_stem":3034,"_extension":20},"/en-us/blog/learn-python-with-pj-part-2",{"title":3020,"description":3021,"ogTitle":3020,"ogDescription":3021,"noIndex":6,"ogImage":2846,"ogUrl":3022,"ogSiteName":692,"ogType":693,"canonicalUrls":3022,"schema":3023},"Learn Python with Pj! Part 2 - Lists and loops","Follow along as our education evangelist Pj Metz learns about lists and loops in the second of this multipart series.","https://about.gitlab.com/blog/learn-python-with-pj-part-2","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Learn Python with Pj! Part 2 - Lists and loops\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"PJ Metz\"}],\n        \"datePublished\": \"2022-03-01\",\n      }",{"title":3020,"description":3021,"authors":3025,"heroImage":2846,"date":3026,"body":3027,"category":14,"tags":3028},[2851],"2022-03-01","\nWe’re back with another article about my journey to learn Python. Check out the [first article](/blog/learn-python-with-pj-part-1/) if you want to see what I’ve already learned. Today we’re talking about lists and loops, two important parts of all programming languages. Let’s check them out. \n\n## Lists\n\nLists are a way to store information that can be accessed later. They are similar to arrays in other languages. A list is a named collection of other elements inside brackets that can be accessed by an index number. \n\n``` python\n#I will be using this list for all our examples, and, yes, these are some of my favorite musical acts pulled directly from my Spotify 2021 wrapped. \nfavorite_music = ['The Midnight', 'Night Tempo', 'St. Lucia'] \n```\n\nIn this list, each element of the list can be accessed by an index number. Like many other languages, python is zero-indexed, meaning the first element is at index 0. So favorite_music[0] is “The Midnight”, favorite_music[1] is “Night Tempo”, and so on. \n\nSomething interesting about lists in Python is that a negative 1 index number will give you the last element in the list. Negative 2 will give you the second to last, and so on. As far as I can tell, this isn’t possible in other languages: Negative 1 indices will return errors or `undefined` in arrays or lists in other languages. I imagine a scenario where we’ve just added something to a list and need to access it immediately. We could use the negative index number to access the most recently added element. \n\nPython comes with several built-in methods to be used with lists. Some of them have the list passed in as an argument, some are added to the list with a `.` so it can be used. These methods will change the list or return some kind of information about the list. Below are a few I found useful, but a more complete explanation of available methods is [available here](https://docs.python.org/3/tutorial/datastructures.html). \n\n### .pop()\n\nPop allows you to remove a specific element in a list as well as return it at the same time, meaning this can be set to a variable. To specify the element, use the desired index number inside the parentheses to remove it. \n\n```python\nbest_synthwave = favorite_music.pop(0)\n\n#returns ‘['Night Tempo', 'St. Lucia']’\nprint(favorite_music)\n\n#returns 'The Midnight'\nprint(best_synthwave)\n```\n### .append() and .insert()\n\nAppend allows you to add an element to a list. Put the element in the parenthesis. The element is added to the end of the list. Insert allows you to say exactly where you would like the element inserted. The first argument is the index you would like to replace, and the second argument is the element to insert. \n\n```python\nfavorite_music.append('Turnstile') \n\n#This will print ['The Midnight', 'Night Tempo', 'St. Lucia', 'Turnstile']\nprint(favorite_music)\n\nfavorite_music.insert(1, 'Kendrick Lamar')\n#This will print 'The Midnight', 'Kendrick Lamar', 'Night Tempo', 'St. Lucia', 'Turnstile'] \n#Turnstile is still there since we appended it before. \nprint(favorite_music)\n```\n\n### len()\n\nLen gets the length of the object passed into it. This is important since you can know exactly how many elements are in a list, which is useful for control flow as well as loops. \n\n```python\nlength_of_music = len(favorite_music)\n\n#working with the original list will print “3”\nprint(length_of_music)\n```\nNotice that it prints how many elements are in the list, not how many indices. I have to work to make sure to keep those two ideas separate. So there are three elements in the list, but the indices are [0], [1], and [2]. \n\n\n## Loops\n\nLoops work very much the same way they do in other languages, but like I’ve seen with the rest of Python, the syntax is more readable and the code just looks a bit cleaner. The two main ways to use loops with Python are `for` and `while`. \n\n### for\n\nFor is used when you want to iterate through each element in an object. The syntax you use here creates a kind of one-time use variable that is then used in the code block in a variety of ways. Let’s say you want to print each band from the favorite_music list from before. \n\n```python\nfor band in favorite_music:\n  print(band)\n```\n\nThis would print each band on its own line. If you call print() on favorite_music, it would print the array inside of brackets. You can perform logic inside of for loops to only return certain items. Say you want to only print bands that have “night” in the name:\n\n```python\nfor band in favorite_music:\n    lower_case_band = band.lower()\n    if lower_case_band.__contains__('night'):\n      print(band)\n```\n\nNote: I put all the strings into lower case so we could match cases. Also, I found the contains method on the internet and the example had two underscores on either side. It made my code work whereas without the underscores it did not work. Like I said in the first article, I’m new here and don’t know why it did that.\n\n**EDIT March 7, 2022:** According to commenter \"Glen666\" in the comments, the easier way to check if something is contained in another object is to use the `in` operator. It would look like this: \n\n```python\nfor band in favorite_music:\n  lower_case_band = band.lower()\n  if \"night\" in lower_case_band:\n    print(band)\n```\nThanks for catching this. I hadn't learned `in` yet so this makes it a bit easier! \n\n### while\n\nWhile creates a loop that goes as long as certain criteria are being satisfied, usually a logic expression. If you want some code to run six times, you could use a while loop. \n\n```python\ni = 0\n#This prints the string below 6 times. \nwhile i \u003C 7:\n  print('The Midnight is my favorite band of all time.')\n  i += 1\n```\n\nThis is useful if you want code to run the whole time some circumstance is true, whether it’s a date, another process is running, or anything of the sort. \n\n**EDIT March 7, 2022:** Thanks to user \"magicolf\" in the comments! They let me know that there's an error here where it prints 7 times instead of 6. Because i is declared as `0` first, the loop will actually print seven times. It's easy to make mistakes like this all the time, so I appreciate you letting me know, magicolf! \n\nLoops are some of my favorite things to write so far. It’s like a little puzzle to figure out when you need to iterate through a list or string to make something happen at a specific time. The hardest part about loops is getting used to the logic of it. Python made this easier for me in that loops feel very natural to read. On top of that, I’m getting used to the indentation that I felt was so strange last time. I’ve spent about 30 or so hours working on it so far, and It’s starting to feel very natural. Hopefully, I can keep this up as we move on to the [next learning modules](https://about.gitlab.com/blog/learn-python-with-pj-part-3/)! \n\n",[1508,894,2855],{"slug":3030,"featured":6,"template":678},"learn-python-with-pj-part-2","content:en-us:blog:learn-python-with-pj-part-2.yml","Learn Python With Pj Part 2","en-us/blog/learn-python-with-pj-part-2.yml","en-us/blog/learn-python-with-pj-part-2",{"_path":3036,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3037,"content":3043,"config":3050,"_id":3052,"_type":16,"title":3053,"_source":17,"_file":3054,"_stem":3055,"_extension":20},"/en-us/blog/parent-child-vs-multi-project-pipelines",{"title":3038,"description":3039,"ogTitle":3038,"ogDescription":3039,"noIndex":6,"ogImage":3040,"ogUrl":3041,"ogSiteName":692,"ogType":693,"canonicalUrls":3041,"schema":3042},"CI/CD patterns with parent-child and multi-project pipelines","Parent-child pipelines inherit a lot of the design from multi-project pipelines, but they also have differences that make them unique.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659961/Blog/Hero%20Images/parent-child-multi-project-pipelines-unsplash.jpg","https://about.gitlab.com/blog/parent-child-vs-multi-project-pipelines","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Breaking down CI/CD complexity with parent-child and multi-project pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fabio Pitino\"}],\n        \"datePublished\": \"2022-02-22\",\n      }",{"title":3044,"description":3039,"authors":3045,"heroImage":3040,"date":3047,"body":3048,"category":14,"tags":3049},"Breaking down CI/CD complexity with parent-child and multi-project pipelines",[3046],"Fabio Pitino","2022-02-22","\nSoftware requirements change over time. Customers request more features and the application needs to scale well\nto meet user demands. As software grows in size, so does its complexity, to the point where we might decide that it's\ntime to split the project up into smaller, cohesive components.\n\nAs we proceed to tackle this complexity we want to ensure that our CI/CD pipelines continue to validate\nthat all the pieces work correctly together.\n\nThere are two typical paths to splitting up software projects:\n\n- **Isolating independent modules within the same repository**: For example, separating the UI from the backend,\n  the documentation from code, or extracting code into independent packages.\n- **Extracting code into a separate repository**: For example, extracting some generic logic into a library, or creating\n  independent microservices.\n\nWhen we pick a path for splitting up the project, we should also adapt the CI/CD pipeline to match.\n\nFor the first path, [GitLab CI/CD](/topics/ci-cd/) provides [parent-child pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html) as a feature that helps manage complexity while keeping it all in a monorepo.\n\nFor the second path, [multi-project pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html)\nare the glue that helps ensure multiple separate repositories work together.\n\nLet's look into how these two approaches differ, and understand how to best leverage them.\n\n## Parent-child pipelines\n\nIt can be challenging to maintain complex CI/CD pipeline configurations, especially when you need to coordinate many jobs that may relate\nto different components, while at the same time keeping the pipeline efficient.\n\nLet's imagine we have an app with all code in the same repository, but split into UI and backend components. A \"one-size-fits-all\" pipeline for this app probably would have all the jobs grouped into common stages that cover all the components. The default is to use `build`, `test`, and `deploy` stages.\nUnfortunately, this could be a source of inefficiency because the UI and backend represent two separate tracks of the pipeline.\nThey each have their own independent requirements and structure and likely don't depend on each other.\nThe UI might not need the `build` stage at all, but it might instead need a `system-test` stage with jobs that test the app end-to-end.\nSimilarly, the UI jobs from `system-test` might not need to wait for backend jobs to complete.\n\n[Parent-child pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html) help here,\nenabling you to extract cohesive parts of the pipeline into child pipelines that runs in isolation.\n\nWith parent-child pipelines we could break the configurations down into two separate\ntracks by having two separate jobs trigger child pipelines:\n\n- The `ui` job triggers a child pipeline that runs all the UI jobs.\n- The `backend` job triggers a separate child pipeline that runs all the backend jobs.\n\n```yaml\nui:\n  trigger:\n    include: ui/.gitlab-ci.yml\n    strategy: depend\n  rules:\n    - changes: [ui/*]\nbackend:\n  trigger:\n    include: backend/.gitlab-ci.yml\n    strategy: depend\n  rules:\n    - changes: [backend/*]\n```\n\nThe modifier `strategy: depend`, which is also available for multi-project pipelines, makes the trigger job reflect the status of the\ndownstream (child) pipeline and waits for it to complete. Without `strategy: depend` the trigger job succeeds immediately after creating the downstream pipeline.\n\nNow the frontend and backend teams can manage their CI/CD configurations without impacting each other's pipelines. In addition to that, we can now explicitly visualize the two workflows.\n\n![example parent-child pipeline](https://about.gitlab.com/images/blogimages/2022-02-01-parent-child-vs-multi-project-pipelines/parent-child.png){: .shadow.medium.center}\n\nThe two pipelines run in isolation, so we can set variables or configuration in one without affecting the other. For example, we could use `rules:changes` or `workflow:rules` inside `backend/.gitlab-ci.yml`, but use something completely different in `ui/.gitlab-ci.yml`.\n\nChild pipelines run in the same context of the parent pipeline, which is the combination of project, Git ref and commit SHA. Additionally, the child pipeline inherits some information from the parent pipeline, including Git push data like `before_sha`, `target_sha`, the related merge request, etc.\nHaving the same context ensures that the child pipeline can safely run as a sub-pipeline of the parent, but be in complete isolation.\n\nA programming analogy to parent-child pipelines would be to break down long procedural code into smaller, single-purpose functions.\n\n## Multi-project pipelines\n\nIf our app spans across different repositories, we should instead leverage [multi-project pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html). Each repository defines a pipeline that suits the project's needs. Then, these standalone and independent pipelines can be chained together to create essentially a much bigger pipeline that ensures all the projects are integrated correctly.\n\nThere can be endless possibilities and topologies, but let's explore a simple case of asking another project\nto run a service for our pipeline.\n\nThe app is divided into multiple repositories, each hosting an independent component of the app.\nWhen one of the components changes, that project's pipeline runs.\nIf the earlier jobs in the pipeline are successful, a final job triggers a pipeline on a different project, which is the project responsible for building, running smoke tests, and\ndeploying the whole app. If the component pipeline fails because of a bug, the process is interrupted and there is no\nneed to trigger a pipeline for the main app project.\n\nThe component project's pipeline:\n\n```yaml\nbuild:\n  stage: build\n  script: ./build_component.sh\n\ntest:\n  stage: test\n  script: ./test_component.sh\n\ndeploy:\n  stage: deploy\n  trigger:\n    project: myorg/app\n    strategy: depend\n```\n\nThe full app project's pipeline in `myorg/app` project:\n\n```yaml\nbuild:\n  stage: build\n  script: ./build_app.sh  # build all components\n\nqa-test:\n  stage: test\n  script: ./qa_test.sh\n\nsmoke-test:\n  stage: test\n  script: ./smoke_test.sh\n\ndeploy:\n  stage: deploy\n  script: ./deploy_app.sh\n```\n\n![example multi-project pipeline](https://about.gitlab.com/images/blogimages/2022-02-01-parent-child-vs-multi-project-pipelines/multi-project.png){: .shadow.center}\n\nIn our example, the component pipeline (upstream) triggers a downstream multi-project pipeline to perform a service:\nverify the components work together, then deploy the whole app.\n\nA programming analogy to multi-project pipelines would be like calling an external component or function to\neither receive a service (using `strategy:depend`) or to notify it that an event occurred (without `strategy:depend`).\n\n## Key differences between parent-child and multi-project pipelines\n\nAs seen above, the most obvious difference between parent-child and multi-project pipelines is the project\nwhere the pipelines run, but there are are other differences to be aware of.\n\nContext:\n\n- Parent-child pipelines run on the same context: same project, ref, and commit SHA.\n- Multi-project pipelines run on completely separate contexts. The upstream multi-project pipeline can indicate [a ref to use](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html), which can indicate what version of the pipeline to trigger.\n\nControl:\n\n- A parent pipeline _generates_ a child pipeline, and the parent can have a high degree of control over what the child pipeline\n  runs. The parent can even [dynamically generate configurations for child pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html).\n- An upstream pipeline _triggers_ a downstream multi-project pipeline. The upstream (triggering) pipeline does not have much control over the structure of the downstream (triggered) pipeline.\n  The upstream project treats the downstream pipeline as a black box.\n  It can only choose the ref to use and pass some variables downstream.\n\nSide-effects:\n\n- The final status of a parent pipeline, like other normal pipelines, affects the status of the ref the pipeline runs against. For example, if a parent pipeline fails on the `main` branch, we say that `main` is broken.\n  The status of a ref is used in various scenarios, including [downloading artifacts](https://docs.gitlab.com/ee/api/job_artifacts.html#download-the-artifacts-archive) from the latest successful pipeline.\n\n  Child pipelines, on the other hand, run on behalf of the parent pipeline, and they don't directly affect the ref status. If triggered using `strategy: depend`, a child pipeline affects the status of the parent pipeline.\n  In turn, the parent pipeline can be configured to fail or succeed based on `allow_failure:` configuration on the job triggering the child pipeline.\n- A multi-project downstream pipeline may affect the status of the upstream pipeline if triggered using `strategy: depend`,\n  but each downstream pipeline affects the status of the ref in the project they run.\n- Parent and child pipelines that are still running are all automatically canceled if interruptible when a new pipeline is created for the same ref.\n- Multi-project downstream pipelines are not automatically canceled when a new upstream pipeline runs for the same ref. The auto-cancelation feature only works within the same project.\n  Downstream multi-project pipelines are considered \"external logic\". They can only be auto-canceled when configured to be interruptible\n  and a new pipeline is triggered for the same ref on the downstream project (not the upstream project).\n\nVisibility:\n\n- Child pipelines are not directly visible in the pipelines index page because they are considered internal\n  sub-components of the parent pipeline. This is to enforce the fact that child pipelines are not standalone and they are considered sub-components of the parent pipeline.\n  Child pipelines are discoverable only through their parent pipeline page.\n- Multi-project pipelines are standalone pipelines because they are normal pipelines, but just happen to be triggered by an another project's pipeline. They are all visible in the pipeline index page.\n\n## Conclusions\n\nParent-child pipelines inherit a lot of the design from multi-project pipelines, but parent-child pipelines have differences that make them a very unique type\nof pipeline relationship.\n\nSome of the parent-child pipelines work we at GitLab will be focusing on is about surfacing job reports generated in child pipelines as merge request widgets,\ncascading cancelation and removal of pipelines as well as passing variables across related pipelines.\nSome of the parent-child pipeline work we at GitLab plan to focus on relates to:\n\n- Surfacing job reports generated in child pipelines in merge request widgets.\n- Cascading cancelation down to child pipelines.\n- Cascading removal down to child pipelines.\n- Passing variables across related pipelines.\n\nYou can check [this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/336884) for planned future developments on parent-child and multi-project pipelines.\nLeave feedback or let us know how we can help.\n\nCover image by [Ravi Roshan](https://unsplash.com/@ravi_roshan_inc?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[832,937,749],{"slug":3051,"featured":6,"template":678},"parent-child-vs-multi-project-pipelines","content:en-us:blog:parent-child-vs-multi-project-pipelines.yml","Parent Child Vs Multi Project Pipelines","en-us/blog/parent-child-vs-multi-project-pipelines.yml","en-us/blog/parent-child-vs-multi-project-pipelines",{"_path":3057,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3058,"content":3063,"config":3068,"_id":3070,"_type":16,"title":3071,"_source":17,"_file":3072,"_stem":3073,"_extension":20},"/en-us/blog/how-to-code-build-and-deploy-from-an-ipad-using-gitlab-and-gitpod",{"title":3059,"description":3060,"ogTitle":3059,"ogDescription":3060,"noIndex":6,"ogImage":2807,"ogUrl":3061,"ogSiteName":692,"ogType":693,"canonicalUrls":3061,"schema":3062},"How to code, build, and deploy from an iPad using GitLab and Gitpod","Senior Developer Evangelist Brendan O'Leary tackles the challenge of doing DevOps from a tablet.","https://about.gitlab.com/blog/how-to-code-build-and-deploy-from-an-ipad-using-gitlab-and-gitpod","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to code, build, and deploy from an iPad using GitLab and Gitpod\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2022-02-10\",\n      }",{"title":3059,"description":3060,"authors":3064,"heroImage":2807,"date":3065,"body":3066,"category":14,"tags":3067},[2558],"2022-02-10","\n\nAs a software engineer, it can be tough to go all-in on just using an iPad for your daily driver. So when Apple announced the M1 chip-based iPads, I, along with many techies, got excited to see if we'd finally get things like a proper terminal on the iPad. But that still isn't the use case that the iPad solves. I remained determined to be able to *code* from mine. So I hooked up my magic keyboard and fired up Gitpod to code and GitLab to build and deploy an app from scratch... all from my iPad.\n\n## Getting started\n\nLike any of [my projects](/blog/introducing-auto-breakfast-from-gitlab/), the first thing I needed was inspiration. I had promised my colleague [Pj](https://brendan.fyi/pj) for some time that I would review [his blog](https://brendan.fyi/pj-twitter-blog) on how to make a Twitter bot like all of the fantastic ones he built while breaking into tech. Combine the need to learn the Twitter API to provide an excellent review with my love of Elton John's music, and I had it: I'd make a Twitter bot that tweeted every morning at 4:00 am (as an homage to the line in “Someone Saved My Life Tonight”).\n\nArmed with my newfound inspiration, I ran to gitlab.com in Safari (on my iPad, obviously) and created a new, blank GitLab project.\n\n![ipad on desk](https://about.gitlab.com/images/blogimages/brendanipad1.png){: .shadow}\n\n## Coding on the iPad\n\nOnce I had the new project, getting started on Gitpod was as easy as clicking the \"Gitpod\" button on GitLab to open my repository in Gitpod.\n\nGitpod enables you to access an entire development environment from any browser. By default, you get a container with many development tools (Node, Ruby, OpenJDK, etc.). But you can also choose [your own container](https://www.gitpod.io/docs/config-docker) as a starting point with a .gitpod.yml… but we'll talk about that later.\n\nThe environment is presented to you as a VS Code interface – where you can open, edit, and add files just as you'd expect. You can also access the terminal just like you would in VS Code and install anything you might need to get your project running.\n\nIn this example, I decided to build the Twitter bot in Node.js, so I initialized a new Node project and installed the packages I'd need with:\n\n```bash\nnpm init -y\nnpm install express twit node-schedule dotenv\n```\n\n## Running your app\n\nOnce I had some code running – just the [Express sample app](https://expressjs.com/en/starter/hello-world.html) that says Hello World – running the app was just as easy as if I was going to run it on my laptop:\n\n```bash\nnpm dev\n```\nNot only did that run my code to connect to the Twitter API, wait until 4:00 a.m. (UTC), and then tweet to let everyone know it was 4:00 a.m., but it also shows this relative to my Express app:\n\n![Express app](https://about.gitlab.com/images/blogimages/brendanipad3.png){: .shadow}\n\nThat allows me to preview my [website for the app](https://brendan.fyi/4oclock) while I'm coding it. This is a massive benefit because it means I can have two tabs open on the iPad – one with Gitpod and my code and another with the website as I change it. Or I can even use split-screen on the iPad to have them side-by-side like I might if I was at my desk at my \"normal\" setup. And there's even a button to make the site available publically so I could share it with my team and show them what I'm working on (as long as my Gitpod workspace is running).\n\nNow, when it comes to coding the rest of the Twitter bot, I used the previously mentioned [tutorial](https://brendan.fyi/pj-twitter-blog) from my colleague [Pj](https://brendan.fyi/pj). So I won't go into detail on the actual coding of the app – you can find the [code](https://gitlab.com/brendan-demo/4oclock), [website](https://brendan.fyi/4oclock), and [Twitter bot](https://twitter.com/DammitOclock) if you want to learn more about the app itself. But to deploy the website and the bot, I needed something else: [GitLab CI/CD](https://docs.gitlab.com/ee/ci/).\n\n## Deploying the app\n\nCombining GitLab CI/CD and GitLab.com's SaaS offering with Gitpod meant that I could not only code and preview the app from my iPad, but I could also get it deployed to Heroku (or any provider) from the couch. \n\nI created a `.gitlab-ci.yml` file in my project to get started. For deploying to Heroku:\n\n- I like to use a Ruby package called [dpl](https://github.com/travis-ci/dpl) from Travis CI because it makes it a simple one-line command.  Alternatively, I could install the [Heroku CLI](https://devcenter.heroku.com/articles/heroku-cli) and deploy with that if I wanted to. \n\n- I added the `HEROKU_API_KEY` variable to my [CI/CD variables](https://docs.gitlab.com/ee/ci/variables/#add-a-cicd-variable-to-a-project) so that I could authenticate with Heroku for the deployment. \n\n- I then set the `rules:` section to only deploy when commits are impacting the main (default) branch, and I was ready to go! \n\nNow, every time I push code from Gitpod to GitLab, GitLab will start the build and deploy it to Heroku:\n\n```yaml\nimage: starefossen/ruby-node:2-10\n\nvariables:\n APP_NAME: four-oclock-in-the-morning\n\ndeploy:\n stage: deploy\n script:\n - gem install dpl -v 1.10.6\n - dpl --provider=heroku --app=$APP_NAME --api-key=$HEROKU_API_KEY\n rules:\n - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH\n```\n\n## Enabling collaboration\n\nThere are two other concepts that this pattern introduces that are worth discussion: the idea of one environment per change and enabling new collaborators to spin up a development environment in seconds.\n\nMost developers are used to having our setup just the way we like it – precisely the correct number of monitors, keys on our keyboard, and all of our favorite tools installed. However, that can lead to issues. We already know we should treat our servers like cattle, not pets, so why do we still treat our laptops like pets? While I love my MacBook and the stickers on it as much as the next person, I can get frustrated when setting up a new one and getting it back to the way I like it.\n\nIn addition, on many projects I've been on in the past, onboarding a new developer can take a lot of effort, including getting the correct libraries installed and ensuring they have access to all the right resources and environments. These things may seem trivial, but I've seen it take up to three days from senior engineers just to get another engineer up and running. All of that time is time that could be much better spent on writing code for the actual business.\n\nGitpod solves both of these issues with a simple YAML file: `.gitpod.yml`. This file allows you to specify:\n\n- What image to use as the base for the environment\n- Which other tools to install\n- What commands to run at startup, and even things like which VSCode extensions you should have in the environment\n\nAnd [lots of different settings](https://www.gitpod.io/docs/references/gitpod-yml) that you can find in the [Gitpod docs](https://www.gitpod.io/docs).\n\nSpecifying all of the tools needed lets you have short-lived environments that you can spin up for one task and then discard and get a fresh one for the next task. And it also saves time when onboarding new engineers by guaranteeing they have a running system within just a few seconds of opening the project. Best of all, it is all in a file that's in source control, so as things change or you make improvements to the development environment, all of your developers benefit from it immediately.  \n\nI added a simple [`.gitpod.yml`](https://gitlab.com/brendan-demo/4oclock/-/blob/main/.gitpod.yml) to run `npm run dev` to get started when you create a new environment. That simple example is great for a simple Node app or similar, but what about something more complex? Gitpod works for that, too. GitLab itself has a [`gitpod.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/.gitpod.yml) that lets you get an entire working GitLab development environment – and all that entails – up and running quickly, without the need to install Postgres and Redis and all of the other dependencies GitLab has.\n\nThis makes contributing to GitLab easier than ever. Just go to the [GitLab repository](https://brendan.fyi/gitlab-repo) and click on that Gitpod button to get started. I'd love to hear how it works for you!\n",[894,726,1347],{"slug":3069,"featured":6,"template":678},"how-to-code-build-and-deploy-from-an-ipad-using-gitlab-and-gitpod","content:en-us:blog:how-to-code-build-and-deploy-from-an-ipad-using-gitlab-and-gitpod.yml","How To Code Build And Deploy From An Ipad Using Gitlab And Gitpod","en-us/blog/how-to-code-build-and-deploy-from-an-ipad-using-gitlab-and-gitpod.yml","en-us/blog/how-to-code-build-and-deploy-from-an-ipad-using-gitlab-and-gitpod",{"_path":3075,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3076,"content":3081,"config":3087,"_id":3089,"_type":16,"title":3090,"_source":17,"_file":3091,"_stem":3092,"_extension":20},"/en-us/blog/gitops-with-gitlab-auto-devops",{"title":3077,"description":3078,"ogTitle":3077,"ogDescription":3078,"noIndex":6,"ogImage":2478,"ogUrl":3079,"ogSiteName":692,"ogType":693,"canonicalUrls":3079,"schema":3080},"Connecting Kubernetes clusters to GitLab with Auto DevOps","This is the 6th article in a series of tutorials on how to do GitOps with GitLab","https://about.gitlab.com/blog/gitops-with-gitlab-auto-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitOps with GitLab: Connecting GitLab with a Kubernetes cluster - Auto DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2022-02-08\",\n      }",{"title":3082,"description":3078,"authors":3083,"heroImage":2478,"date":3084,"body":3085,"category":14,"tags":3086},"GitOps with GitLab: Connecting GitLab with a Kubernetes cluster - Auto DevOps",[2014],"2022-02-08","\n_It is possible to use GitLab as a best-in-class GitOps tool, and this blog post series is going to show you how. These easy-to-follow tutorials will focus on different user problems, including provisioning, managing a base infrastructure, and deploying various third-party or custom applications on top of them. You can find the entire \"Ultimate guide to GitOps with GitLab\" tutorial series [here](/blog/the-ultimate-guide-to-gitops-with-gitlab/)._\n\nIn this article we will look at how one can use Auto DevOps with all its bells and whistles to easily manage deployments.\n\n## Prerequisites\n\nThis article builds upon the previous tutorials in this series. We will assume that you have a Kubernetes cluster connected to GitLab using the GitLab Agent for Kubernetes, and you understand how the CI/CD tunnel works.\n\nIf this is not the case, I recommend to follow the previous articles to have a similar setup from where we will start today.\n\n## What is Auto DevOps\n\nAuto DevOps is GitLab's answer to the complexity of software application delivery. It is a set of opinionated templates that can be used \"as-is\" or can be used to fast-track your own pipeline building. For some setups it works from testing through various security and compliance checks to canary deployments. Even if you have a less supported setup, you should be able to reuse some of its components, from security linting to deployment.\n\nYou can read more about the various [features built into Auto DevOps in our documentation](https://docs.gitlab.com/ee/topics/autodevops/).\n\n## The plan for building and deploying a minimul application\n\nThe plan for this article is to build and deploy a minimal application. The focus will be on showing how you can get started quickly, without any modifications on the Auto Deploy pipelines.\n\nThis setup will use the already known CI/CD tunnel. There will be a separate article that shows how to replace the \"Auto Deploy\" part of Auto DevOps with GitOps style deployments.\n\nIn this article, we will deploy a simple hello world application. This is not a tutorial about Auto DevOps, so we will only focus on the setup needed when used together with the GitLab Agent for Kubernetes.\n\nYou can see the final repository under https://gitlab.com/gitlab-examples/ops/gitops-demo/hello-world-service/.\n\n## How to build the application\n\nIn this section we will create our super simple hello world application and put a Dockerfile beside it.\n\n1. Start a new project.\n1. Add `src/main.py` with the following content:\n    ```python\n    # From https://gist.github.com/davidbgk/b10113c3779b8388e96e6d0c44e03a74\n    import http.server\n    import socketserver\n    from http import HTTPStatus\n\n    class Handler(http.server.SimpleHTTPRequestHandler):\n        def do_GET(self):\n            self.send_response(HTTPStatus.OK)\n            self.end_headers()\n            self.wfile.write(b'Hello world')\n\n    httpd = socketserver.TCPServer(('', 5000), Handler)\n    httpd.serve_forever()\n    ```\n1. Create the `Dockerfile` with:\n   ```\n   FROM python:3.9.10-slim-bullseye\n\n   WORKDIR /app\n\n   COPY ./src .\n\n   EXPOSE 5000\n\n   CMD [ \"python\", \"main.py\" ]\n   ```\n1. Commit the change to the repository.\n\n## How to set up Auto DevOps\n\n1. [Share the CI/CD tunnel](https://docs.gitlab.com/ee/user/clusters/agent/work_with_agent.html) with the hello-world project. Note, that the Agent configuration project amd the application project should be in the same project hierarchy and the Agent configuration project needs to be higher in this hierarchy.\n    ```yaml\n    ci_access:\n      # This agent is accessible from CI jobs in projects in these groups\n      projects:\n        - id: \u003Cpath>/\u003Cto>/\u003Cyour>/\u003Cproject>\n    ```\n1. Find out the Kubernetes context name. The agent context name is `\u003Cnamespace>/\u003Cgroup>/\u003Cproject>:\u003Cagent-name>`. You can see the available contexts in CI with the following job:\n    ```yaml\n    contexts:\n      stage: .pre\n      image:\n        name: bitnami/kubectl:latest\n        entrypoint: [\"\"]\n      script:\n        - kubectl config get-contexts \n    ```\n1. Create your `.gitlab-ci.yml` file to have Auto DevOps working:\n    ```yaml\n    include:\n        template: Auto-DevOps.gitlab-ci.yml\n\n    variables:\n        # KUBE_INGRESS_BASE_DOMAIN is the application deployment domain and should be set as a variable at the group or project level.\n        KUBE_INGRESS_BASE_DOMAIN: 74.220.23.215.nip.io\n        KUBE_CONTEXT: \"gitlab-examples/ops/gitops-demo/k8s-agents:demo-agent\"\n        KUBE_NAMESPACE: \"demo-agent\"\n\n        # Feel free to enable any of these\n        TEST_DISABLED: \"true\"\n        CODE_QUALITY_DISABLED: \"true\"\n        LICENSE_MANAGEMENT_DISABLED: \"true\"\n        BROWSER_PERFORMANCE_DISABLED: \"true\"\n        LOAD_PERFORMANCE_DISABLED: \"true\"\n        SAST_DISABLED: \"true\"\n        SECRET_DETECTION_DISABLED: \"true\"\n        DEPENDENCY_SCANNING_DISABLED: \"true\"\n        CONTAINER_SCANNING_DISABLED: \"true\"\n        DAST_DISABLED: \"true\"\n        REVIEW_DISABLED: \"true\"\n        CODE_INTELLIGENCE_DISABLED: \"true\"\n        CLUSTER_IMAGE_SCANNING_DISABLED: \"true\"\n        POSTGRES_ENABLED: \"false\"\n    ```\n1. Commit the changes.\n\nAs you can see, I disabled many Auto DevOps functionalities in the above CI YAML. I did this for two reasons:\n\n1. Some of these features require a Premium or Ultimate license or tests in the repo. I wanted to keep this tutorial \"stable\" for everyone.\n1. Every use case differs a little bit and Auto DevOps allows a large number of customizations. I wanted to highlight this by showing you the most basic ones. Read more about [customizing Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/customize.html). If you would like [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/) support, just remove the `REVIEW_DISABLED` line.\n\nThere are actually only three settings to get the Auto DevOps pipeline up and running:\n\n- The `KUBE_CONTEXT` specifies the context used for the connection, it's provided by the GitLab Agent for Kubernetes.\n- The `KUBE_NAMESPACE` specifies the Kubernetes namespace to target with the deployments. This namespace will be used as we apply the Helm charts used behind the hood.\n- The `KUBE_INGRESS_BASE_DOMAIN` sets up an Ingress and enables user friendly access to the deployed service. \n\n## Recap\n\nA very common setup I see with GitLab customers is that the development team is responsible for writing the application code and packaging it into a Docker container. During this process, they take care of basic testing as well, but they are not familiar with all the security and compliance requirements or the deployment pipelines used within the company. The presented setup and the Auto DevOps suite of templates serves these teams. As you can see, the teams need minimal GitLab CI setup to run a complex pipeline that can take care of many of their requirements.\n\n## What's next\n\nIn the next article, I will show you how to deploy an application project with a GitOps style workflow.\n\n_[Click here](/blog/the-ultimate-guide-to-gitops-with-gitlab/) for the next tutorial._\n",[535,1002,726],{"slug":3088,"featured":6,"template":678},"gitops-with-gitlab-auto-devops","content:en-us:blog:gitops-with-gitlab-auto-devops.yml","Gitops With Gitlab Auto Devops","en-us/blog/gitops-with-gitlab-auto-devops.yml","en-us/blog/gitops-with-gitlab-auto-devops",{"_path":3094,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3095,"content":3101,"config":3107,"_id":3109,"_type":16,"title":3110,"_source":17,"_file":3111,"_stem":3112,"_extension":20},"/en-us/blog/git-fetch-performance-2021-part-2",{"title":3096,"description":3097,"ogTitle":3096,"ogDescription":3097,"noIndex":6,"ogImage":3098,"ogUrl":3099,"ogSiteName":692,"ogType":693,"canonicalUrls":3099,"schema":3100},"Git fetch performance improvements in 2021, Part 2 ","Looking back at the server-side performance improvements we made in 2021 for Git fetch.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663383/Blog/Hero%20Images/tanuki-bg-full.png","https://about.gitlab.com/blog/git-fetch-performance-2021-part-2","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Git fetch performance improvements in 2021, Part 2 \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jacob Vosmaer\"}],\n        \"datePublished\": \"2022-02-07\",\n      }",{"title":3096,"description":3097,"authors":3102,"heroImage":3098,"date":3104,"body":3105,"category":14,"tags":3106},[3103],"Jacob Vosmaer","2022-02-07","\nIn [Part 1](/blog/git-fetch-performance/) of this two-part series, we looked at how much server-side Git fetch performance, especially for CI, has improved in GitLab in 2021. Now, we will discuss how we achieved this.\n\n## Recap of Part 1\n-   In December 2019, we set up custom CI fetch caching automation for\n   `gitlab-org/gitlab`, which we internally called \"the CI pre-clone\n   script\".\n-   In December 2020, we encountered some production incidents on GitLab.com,\n   which highlighted that the CI pre-clone script had become critical\n   infrastructure but, at the same time, it had not yet matured beyond\n   a custom one-off solution.\n-   Over the course of 2021, we built an alternative caching solution\n   for CI Git fetch traffic called the pack-objects cache. In Part 1,\n   we discussed a benchmark simulating CI fetch traffic which shows\n   that the pack-objects cache combined with other efficiency\n   improvements reduced GitLab server CPU consumption 9x compared to\n   the baseline of December 2020.\n\n## The pack-objects cache\n\nAs discussed in Part 1, what we realized through the\nproduction incidents in December 2020 was that the CI pre-clone script\nfor `gitlab-org/gitlab` had become a critical piece of infrastructure.\nAt the same time, it benefited only one Git repository on GitLab.com,\nand it was not very robust. It would be much better to have an\nintegrated solution that benefits all repositories. We achieved this\ngoal by building the [pack-objects cache](https://docs.gitlab.com/ee/administration/gitaly/configure_gitaly.html#pack-objects-cache).\n\nThe name \"pack-objects cache\" refers to `git pack-objects`, which is\nthe Git [subcommand](https://git-scm.com/docs/git-pack-objects) that\nimplements the [packfile](https://git-scm.com/book/en/v2/Git-Internals-Packfiles) compression algorithm. As this [Git commit message from Jeff King](https://gitlab.com/gitlab-org/gitlab-git/-/commit/20b20a22f8f7c1420e259c97ef790cb93091f475) explains, `git pack-objects` is a good candidate for a CI fetch cache.\n\n> You may want to insert a caching layer around\n> pack-objects; it is the most CPU- and memory-intensive\n> part of serving a fetch, and its output is a pure\n> function of its input, making it an ideal place to\n> consolidate identical requests.\n\nThe pack-objects cache is GitLab's take on this \"caching layer\". It\ndeduplicates identical Git fetch requests that arrive within a short\ntime window.\n\nAt a high level, when serving a fetch, we buffer the output of `git\npack-objects` into a temporary file. If an identical request comes in,\nwe serve it from the buffer file instead of creating a new `git\npack-objects` process. After 5 minutes, we delete the buffer file. If\nyou want to know more about how exactly the cache is implemented, you\ncan look at the implementation\n([1](https://gitlab.com/gitlab-org/gitaly/-/blob/v14.6.3/internal/gitaly/service/hook/pack_objects.go),\n[2](https://gitlab.com/gitlab-org/gitaly/-/tree/v14.6.3/internal/streamcache)).\n\n![Architecture diagram](https://about.gitlab.com/images/blogimages/git-fetch-2021/pack-objects-cache-architecture.jpg)\n\nBecause the amount of space used by the cache files is bounded roughly\nby the eviction window (5 minutes) multiplied by the maximum network bandwidth\nof the Gitaly server, we don't have to worry about the cache using a\nlot of storage. In fact, on GitLab.com, we store the cache files on the\nsame disks that hold the repository data. We leave a safety margin of\nfree space on these disks at all times anyway, and the cache fits in\nthat safety margin comfortably.\n\nSimilarly, we also don't notice the increase disk input/output\noperations per second (IOPS) used by the cache on GitLab.com. There\nare two reasons for this. First of all, whenever we _read_ data from\nthe cache, it is usually still in the Linux page cache, so it gets\nserved from RAM. The cache barely does any disk read I/O operations.\nSecond, although the cache does do _write_ operations, these fit\ncomfortably within the maximum sustained IOPS rate supported by the\nGoogle Compute Engine persistent disks we use.\n\nThis leads us to a disadvantage of the pack-objects cache, which is\nthat it really does write a lot of data to disk. On GitLab.com, we saw\nthe disk write throughput jump up by an order of magnitude. You can\nsee this in the graph below, which shows disk writes for a single\nGitaly server with a busy, large repository on it: (the GitLab [company\nwebsite](https://gitlab.com/gitlab-com/www-gitlab-com)). You can\nclearly see the number of bytes written to disk per second jump up when we\nturned the cache on.\n\n![increased disk writes with cache enabled](https://about.gitlab.com/images/blogimages/git-fetch-2021/cache-disk-writes.jpg)\n\nThis increase in disk writes is not a problem for our infrastructure because we have the\nspare capacity, but we were not sure we could assume the same for all\nother GitLab installations in the world. Because of this, we decided\nto leave the pack-objects cache off by default.\n\nThis was a difficult decision because we think almost all GitLab\ninstallations would benefit from having this cache enabled. One of the\nreasons we are writing this blog post is to raise awareness that this\nfeature is available, so that self-managed GitLab administrators can\nopt in to using it.\n\nAgain, on the positive side, the cache did not introduce a new\npoint of failure on GitLab.com. If the `gitaly` service is running,\nand if the repository storage disk is available, then the cache is\navailable. There are no external dependencies. And if `gitaly` is not\nrunning, or the repository storage disk is unavailable, then the whole\nGitaly server is unavailable anyway.\n\nAnd finally, cache capacity grows naturally with the number of Gitaly\nservers. Because the cache is completely local to each Gitaly server,\nwe do not have to worry about whether the cache keeps working as we\ngrow GitLab.com.\n\nThe pack-objects cache was introduced in GitLab 13.11. In GitLab 14.5,\nwe made it a lot more efficient by optimizing its transport using Unix\nsockets\n([1](https://gitlab.com/gitlab-org/gitaly/-/merge_requests/3758),\n[2](https://gitlab.com/gitlab-org/gitaly/-/merge_requests/3759)). If\nyou want to [try out the pack-objects cache](https://docs.gitlab.com/ee/administration/gitaly/configure_gitaly.html#pack-objects-cache) on\nyour self-managed GitLab instance, we recommend that you upgrade to\nGitLab 14.5 or newer first.\n\n## Improved RPC transport for Git HTTP\n\nAfter we built the pack-objects cache, we were able to generate a much\nhigher volume of Git fetch responses on a single Gitaly server.\nHowever, we then found out that the RPC transport between the HTTP\nfront-end (GitLab Workhorse) and the Gitaly server became a\nbottleneck. We tried disabling the CI pre-clone script of\n`gitlab-org/gitlab` in April 2021 but we quickly had to turn it back\non because the increased volume of Git fetch data transfer was slowing\ndown the rest of Gitaly.\n\nThe fetch traffic was acting as a noisy neighbor to all the other\ntraffic on `gitlab-org/gitlab`. For each GitLab.com Gitaly server, we\nhave a request latency\n[SLI](https://sre.google/sre-book/service-level-objectives/). This is\na metric that observes request latencies for a selection of RPCs that\nwe expect to be fast, and it tracks how many requests for these RPCs\nare \"fast enough\". If the percentage of fast-enough requests drops\nbelow a certain threshold, we know we have a problem.\n\nWhen we disabled the pre-clone script, the network traffic to the\nGitaly server hosting `gitlab-org/gitlab` went up, as expected. What\nwent wrong was that the percentage of fast-enough requests started to\ndrop. This was not because the server had to serve up more data: The\nRPCs that serve the Git fetch data do not count towards the latency\nSLI.\n\nBelow you see two graphs from the day we tried disabling the CI\npre-clone script. First, see how the network traffic off of the Gitaly\nserver increased once we disabled the CI pre-clone script. This is\nbecause instead of pulling most of the data from object storage, and\nonly some of the data from Gitaly, the CI runners now started pulling\nall of the Git data they needed from Gitaly.\n\n![network peaks](https://about.gitlab.com/images/blogimages/git-fetch-2021/no-script-network-annotated.png)\n\nNow consider our Gitaly request latency SLI for this particular\nserver. For historical reasons, we call this \"Apdex\" in our dashboards.\nRecall that this SLI tracks the percentage of fast-enough requests from\na selection of Gitaly RPCs. The ideal number would be 100%. In the\ntime window where the CI pre-clone script was disabled, this graph\nspent more time below 99%, and it even dipped below 96% several times.\n\n![latency drops](https://about.gitlab.com/images/blogimages/git-fetch-2021/no-script-latency-annotated.png)\n\nEven though we could not explain what was going on, the latency SLI dips\nwere clear evidence that disabling the CI pre-clone script slowed down\nunrelated requests to this Gitaly server, to a point which is\nunacceptable. This was a setback for our plan to replace the CI pre-clone script.\n\nBecause we did not want to just give up, we set aside some time to try\nand understand what the bottleneck was, and if it could be\ncircumvented. The bad news is that we did not come up with a\nsatisfactory answer about what the bottleneck is. But the good news is\nthat we were able to circumvent it.\n\nBy building a simplified [prototype alternate RPC\ntransport](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1046),\nwe were able to find out that with the pack-objects cache, the\nhardware we run on and Git itself were able to serve up much more\ntraffic than we were able to get out of GitLab. We [never got to the\nbottom](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1024)\nof what was causing all the overhead but a likely suspect is the fact\nthat gRPC-Go allocates memory for each message it sends, and with Git\nfetch traffic we send a lot of messages. Gitaly was spending a lot of\ntime doing garbage collection.\n\nWe then had to decide how to improve the situation. Because we were\nuncertain if we could fix the apparent bottleneck in gRPC, and because\nwe were certain that we could go faster by not sending the Git fetch data\nthrough gRPC in the first place, we chose to do the latter. We created\nmodified versions of the RPCs that carry the bulk of the Git fetch\ndata. On the surface, the new versions are still gRPC methods. But\nduring a call, each will establish a side channel, and use that for\nthe bulk data transfer.\n\n![side channel diagram](https://about.gitlab.com/images/blogimages/git-fetch-2021/sidechannel.png)\n\nThis way we avoided making major changes to the structure of Gitaly:\nit is still a gRPC server application. Logging, metrics,\nauthentication, and other middleware work as normal on the optimized\nRPCs. But most of the data transfer happens on either Unix sockets (for localhost RPC calls) or [Yamux streams](https://github.com/hashicorp/yamux/) (for the regular RPC calls).\n\nBecause we have 6x more Git HTTP traffic than Git SSH traffic on\nGitLab.com, we decided to initially only optimize the transport for\nGit HTTP traffic. We are still working on [doing the same for Git\nSSH](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/652) because, even though Git HTTP efficiency is more important for\nGitLab.com than that of Git SSH, we know that for some self-managed\nGitLab instances it is the other way around.\n\nThe new server-side RPC transport for Git HTTP was released in GitLab\n14.5. There is no configuration required for this improved transport.\nRegardless of whether you use the pack-objects cache on your GitLab\ninstance, Gitaly, Workhorse, and Praefect all use less CPU to handle\nGit HTTP fetch requests now.\n\nThe payoff for this work came in October 2021 when we disabled the CI\npre-clone script for `gitlab-org/gitlab`, which did not cause any\nnoisy neighbor problems this time. We have had no issues since then\nserving the Git fetch traffic for that project.\n\n## Improvements to Git itself\n\nAside from the pack-objects cache and the new RPC transport between\nWorkhorse and Gitaly, we also saw some improvements because of changes\nin Git itself. We discovered a few inefficiencies which we\nreported to the Git mailing list and helped get fixed.\n\nOur main repository `gitlab-org/gitlab` has hundreds of thousands of [Git\nreferences](https://git-scm.com/book/en/v2/Git-Internals-Git-References). Looking at CPU profiles, we [noticed](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/400) that a lot of Git\nfetch time was spent on the server iterating over these references.\nThese references were not even being sent back to the client; Git was\njust scanning through all of them on the server twice for each CI job.\n\nIn both cases, the problem could be fixed by doing a scan over a\nsubset instead of a scan across all references. These two problems got fixed\n([1](https://gitlab.com/gitlab-org/gitlab-git/-/commit/b3970c702cb0acc0551d88a5f34ad4ad2e2a6d39), [2](https://gitlab.com/gitlab-org/gitlab-git/-/commit/be18153b975844f8792b03e337f1a4c86fe87531)) in Git 2.31.0, released in March 2021.\n\nLater on, we found a different problem, also in the reference-related\nworkload of Git fetch. As part of the fetch protocol, the server sends\na list of references to the client so that the client can update its\nlocal branches etc. It turned out that for each reference, Git was\ndoing 1 or 2 `write` system calls on the server. This led to [a lot of\noverhead](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1257), and this was made worse by our old RPC transport which could\nend up sending 1 RPC message per advertised Git reference.\n\nThis problem got fixed in Git itself by changing the functions that\nwrite the references to [use buffered\nIO](https://gitlab.com/gitlab-org/gitlab-git/-/commit/70afef5cdf29b5159f18df1b93722055f78740f8).\nThis change landed in Git 2.34.0, released in November 2021. Ahead of\nthat, it got shipped in GitLab 14.4 as a custom Git patch.\n\nFinally, we discovered that increasing the copy buffer size used by\n`git upload-pack` to relay `git pack-objects` output made both `git\nupload-pack` and [every link in the chain after\nit](https://gitlab.com/gitlab-org/gitaly/-/merge_requests/4224) more\nefficient. This got fixed in Git by [increasing the buffer\nsize](https://gitlab.com/gitlab-org/gitlab-git/-/commit/55a9651d26a6b88c68445e7d6c9f511d1207cbd8).\nThis change is part of Git 2.35.0 and is included in GitLab 14.7, both\nof which were released in January 2022.\n\n## Summary\n\nIn Part 1, we showed that GitLab server performance when service CI Git fetch traffic has improved a lot in 2021. In this post, we explained that the improvements are due to:\n\n- The pack-objects cache\n- A more efficient Git data transport between server-side GitLab components\n- Efficiency improvements in Git itself\n\n## Thanks\n\nMany people have contributed to the work described in this blog post.\nI would like to specifically thank Quang-Minh Nguyen and Sean McGivern\nfrom the Scalability team, and Patrick Steinhardt and Sami Hiltunen\nfrom the Gitaly team.\n\n## Related content\n\n- Improvements to the client-side performance of `git fetch` (although GitLab is a server application, it sometimes acts as a Git client): [mirror fetches](https://gitlab.com/gitlab-org/git/-/issues/95), [fetches into repositories with many references](https://gitlab.com/gitlab-org/git/-/issues/94)\n- Improvements to server-side Git push performance: [consistency check improvements](https://gitlab.com/gitlab-org/git/-/issues/92)\n",[702,1286,704],{"slug":3108,"featured":6,"template":678},"git-fetch-performance-2021-part-2","content:en-us:blog:git-fetch-performance-2021-part-2.yml","Git Fetch Performance 2021 Part 2","en-us/blog/git-fetch-performance-2021-part-2.yml","en-us/blog/git-fetch-performance-2021-part-2",{"_path":3114,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3115,"content":3121,"config":3127,"_id":3129,"_type":16,"title":3130,"_source":17,"_file":3131,"_stem":3132,"_extension":20},"/en-us/blog/cicd-tunnel-impersonation",{"title":3116,"description":3117,"ogTitle":3116,"ogDescription":3117,"noIndex":6,"ogImage":3118,"ogUrl":3119,"ogSiteName":692,"ogType":693,"canonicalUrls":3119,"schema":3120},"Fine-grained permissions with impersonation in CI/CD tunnel","Learn how to use use fine-grained permissions via generic impersonation in CI/CD Tunnel","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667435/Blog/Hero%20Images/tunnel.jpg","https://about.gitlab.com/blog/cicd-tunnel-impersonation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use fine-grained permissions via generic impersonation in CI/CD Tunnel\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2022-02-01\",\n      }",{"title":3122,"description":3117,"authors":3123,"heroImage":3118,"date":3124,"body":3125,"category":14,"tags":3126},"How to use fine-grained permissions via generic impersonation in CI/CD Tunnel",[1101],"2022-02-01","\nThe [CI/CD Tunnel](https://docs.gitlab.com/ee/user/clusters/agent/ci_cd_workflow.html), which leverages the [GitLab Agent for Kubernetes](https://docs.gitlab.com/ee/user/clusters/agent/), enables users to access Kubernetes clusters from GitLab CI/CD jobs. In this blog post, we review how you can securely access your clusters from your CI/CD pipelines by using generic impersonation. In addition, we will briefly cover the activity list of the GitLab Agent for Kubernetes, a capability recently introduced by GitLab, that can help you detect and troubleshoot faulty events.\n\n## Using impersonation with your CI/CD tunnel\n\nThe CI/CD Tunnel leverages the GitLab Agent for Kubernetes, which permits the secure connectivity between GitLab and your Kubernetes cluster without the need to expose your cluster to the internet and outside your firewall. The CI/CD Tunnel allows you to connect to your Kubernetes cluster from your CI/CD jobs/pipelines.\n\nBy default, the CI/CD Tunnel inherits all the permissions from the service account used to install the Agent in the cluster. However, fine-grained permissions can be used in conjunction with the CI/CD Tunnel to restrict and manage access to your cluster resources.\n\nFine-grained permissions control with the CI/CD tunnel via impersonation:\n\n- Allows you to leverage your K8s authorization capabilities to limit the permissions of what can be done with the CI/CD tunnel on your running cluster\n\n- Lowers the risk of providing unlimited access to your K8s cluster with the CI/CD tunnel\n\n- Segments fine-grained permissions with the CI/CD tunnel at the project or group level\n\n- Controls permissions with the CI/CD tunnel at the username or service account\n\nTo restrict access to your cluster, you can use impersonation. To specify impersonations, use the access_as attribute in your Agent's configuration file and use Kubernetes RBAC rules to manage impersonated account permissions.\n\nYou can impersonate:\n- The Agent itself (default)\n= The CI job that accesses the cluster\n- A specific user or system account defined within the cluster\n\n## Steps to exercise impersonation with the CI/CD Tunnel\n\nLet's go through the steps on how you can exercise impersonation with the CI/CD Tunnel.\n\n### Creating your Kubernetes cluster\n\nIn order to exercise the capabilities described above, we need a Kubernetes cluster. Although, you can use any Kubernetes distribution, for this example, we create a GKE Standard Kubernetes cluster and name it \"csaavedra-ga4k-cluster\". We select the zone and version 1.21 of Kubernetes and ensure that our cluster will have three nodes. We leave the security and metadata screens with their defaulted values and click on the create button:\n\n![Creating a GKE cluster](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/0-gke-creation.png){: .shadow.medium.center.wrap-text}\nCreating a GKE cluster\n{: .note.text-center}\n\n### Sample projects to be used\n\nLet's proceed now to this [top-level group](https://gitlab.com/tech-marketing/sandbox/gl-14-5-cs-demos), which contains three projects, which we will use to show impersonation with the CI/CD tunnel. You can do this at the project or group level. In this example, we will show setting impersonation at the project level:\n\n![Project structure in GitLab](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/1-project-struct.png){: .shadow.medium.center.wrap-text}\nProject structure in GitLab\n{: .note.text-center}\n\nProject \"ga4k\" will configure the GitLab Agent for Kubernetes and also set impersonations with the CI/CD tunnel. Project \"sample-application\" will use the CI/CD tunnel, managed by the agent, to connect to the Kubernetes cluster and execute a pipeline using different impersonations. Project \"cluster-management\" will also use the CI/CD tunnel to connect to the cluster and install the Ingress application on it.\n\nNot only does the CI/CD tunnel streamline the deployment, management, and monitoring of Kubernetes-native applications, but it also does it securely and safely by using impersonations that leverage your Kubernetes cluster's RBAC rules.\n\nProject \"ga4k\" contains and manages the configuration for the GitLab Agent for K8s called \"csaavedra-agentk\". Looking at its \"config.yaml\" file, we see that the agent points to itself for manifest projects, but most importantly, it provides CI/CD tunnel access to two projects: \"sample-application\" and \"cluster-management\". This means that these two projects' CI/CD pipelines will have access to the K8s cluster that the agent is securely connected to:\n\n![The GitLab Agent for K8s configuration](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/2-agent-config.png){: .shadow.medium.center.wrap-text}\nThe GitLab Agent for K8s configuration\n{: .note.text-center}\n\nProject \"sample-application\" has a pipeline, which we will later execute under different impersonations. And project \"cluster-management\" has a pipeline that will install only the Ingress application on the Kubernetes cluster, as configured in its helmfile.yaml file:\n\n![Deployable applications in cluster-management project](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/3-cluster-mgmt-helmfile.png){: .shadow.medium.center.wrap-text}\nDeployable applications in cluster-management project\n{: .note.text-center}\n\n### Connecting the Agent to your Kubernetes cluster\n\nLet's head back to project \"ga4k\" and connect to the Kubernetes cluster via the agent. We select agent \"csaavedra-agentk\" to register with GitLab:\n\n![List of defined agents](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/4-agents-popdown.png){: .shadow.medium.center.wrap-text}\nList of defined agents\n{: .note.text-center}\n\nThis step generates a token that we can use to install the agent on the cluster. We copy the Docker command to our local desktop for later use. Notice that the command includes the generated token, which you can also copy:\n\n![Docker command to deploy agent to your K8s cluster](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/5-docker-cmd.png){: .shadow.medium.center.wrap-text}\nDocker command to deploy agent to your K8s cluster\n{: .note.text-center}\n\nFrom a local command window, we ensure that our connectivity parameters to GCP are correct:\n\n![Checking your GCP connectivity parameters](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/6-gcp-connectivity.png){: .shadow.medium.center.wrap-text}\nChecking your GCP connectivity parameters\n{: .note.text-center}\n\nWe then add the credentials to our kubeconfig file to connect to our newly created Kubernetes cluster \"csaavedra-ga4k-cluster\" and verify that our context is set to it:\n\n![Adding your cluster credentials to your kubeconfig](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/7-adding-creds.png){: .shadow.medium.center.wrap-text}\nAdding the credentials of your cluster to your kubeconfig\n{: .note.text-center}\n\nOnce this is done, we can list all the pods that are up and running on the cluster by entering `kubectl get pods –all-namespaces`:\n\n![Listing the pods in your running cluster](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/8-listing-pods.png){: .shadow.medium.center.wrap-text}\nListing the pods in your running cluster\n{: .note.text-center}\n\nFinally, we paste the docker command that will install the GitLab Agent for Kubernetes to this cluster making sure that its namespace is \"ga4k-agent\":\n\n![Deploying the agent to your K8s cluster](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/9-pasted-docker-cmd.png){: .shadow.medium.center.wrap-text}\nDeploying the agent to your K8s cluster\n{: .note.text-center}\n\nWe list the pods one more time to check that the agent pod is up and running on the cluster:\n\n![Agent up and running on your K8s cluster](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/10-agent-up.png){: .shadow.medium.center.wrap-text}\nAgent up and running on your K8s cluster\n{: .note.text-center}\n\nThe screen will refresh and show our Kubernetes cluster connected via the agent:\n\n![Agent connected to your K8s cluster](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/11-agent-connected.png){: .shadow.large.center.wrap-text}\nAgent connected to your K8s cluster\n{: .note.text-center}\n\n### The Agent's Activity Information page\n\nClicking on the agent name takes us to the Agent's Activity Information page, which lists agent events in real time. This information can help monitor your cluster's activity and detect and troubleshoot faulty events from your cluster. Connection and token information is currently listed with more events coming in future releases:\n\n![Agent activity information page](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/12-agent-activity.png){: .shadow.small.center.wrap-text}\nAgent activity information page\n{: .note.text-center}\n\n### Deploying Ingress to your Kubernetes cluster using default impersonation\n\nBy default, the CI/CD Tunnel inherits all the permissions from the service account used to install the agent in the cluster. Per the agent's configuration, the CI/CD pipelines of the \"cluster-management\" project will have access to the K8s cluster that the agent is securely connected to. Let's leverage this connectivity to deploy the Ingress application to the Kubernetes cluster from project \"cluster-management\". Let's make a small update to the project pipeline to launch it. Once the pipeline launches, we navigate to its detail view to track its completion:\n\n![Project \"cluster-management\" pipeline completed](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/13-cluster-mgmt-pipeline.png){: .shadow.small.center.wrap-text}\nProject \"cluster-management\" pipeline completed\n{: .note.text-center}\n\nand check the log of its **apply** job to verify that it was able to switch to the agent's context and successfully ran all the installation steps:\n\n![Ingress deployed to your cluster via CI/CD Tunnel using default impersonation](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/14-apply-job-log.png){: .shadow.medium.center.wrap-text}\nIngress deployed to your cluster via CI/CD Tunnel using default impersonation\n{: .note.text-center}\n\nFor further verification, we list the pods in the cluster and check that the ingress pods are up and running:\n\n![Ingress pods up and running](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/15-ingress-pods-up.png){: .shadow.medium.center.wrap-text}\nIngress pods up and running on your cluster\n{: .note.text-center}\n\n### Start trailing the agent's log file to watch updates\n\nBefore we start the impersonation use cases, let's start trailing the agent's log file from a command window:\n\n![Trailing agent log from the command line](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/16-trail-agent-log.png){: .shadow.medium.center.wrap-text}\nTrailing agent log from the command line\n{: .note.text-center}\n\nAnd also let's increase its logging to debug:\n\n![Increasing the agent log level to debug](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/17-agent-logging-level.png){: .shadow.medium.center.wrap-text}\nIncreasing the agent log level to debug\n{: .note.text-center}\n\n### Running impersonation using access_as:ci_job\n\nLet's now impersonate the CI job that accesses the cluster. For this, we modify the agent's configuration and add the \"access_as\" attribute with the \"ci_job\" tag under it:\n\n![Impersonating the CI job](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/18-ci-job-impersonation.png){: .shadow.medium.center.wrap-text}\nImpersonating the CI job\n{: .note.text-center}\n\nAs we save the updated configuration, we verify in the log output that the update has taken place in the running agent:\n\n![Agent updated with CI job impersonation](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/19-agent-conf-updated.png){: .shadow.large.center.wrap-text}\nAgent updated with CI job impersonation\n{: .note.text-center}\n\nNotice that the pipeline of the \"sample-application\" project has a test stage and a test job. It sets the variable KUBE_CONTEXT first, loads an image with the version of kubectl that matches the version of the K8s cluster, and executes two kubectl commands that access the remote cluster via the agent:\n\n![Project \"sample-application\" pipeline](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/20-sample-application-pipeline.png){: .shadow.medium.center.wrap-text}\nProject \"sample-application\" pipeline\n{: .note.text-center}\n\nWe manually execute the pipeline of the \"sample-application\" project and verify in the job log output that the context switch was successful and that the kubectl commands executed correctly:\n\n![Job log output with CI impersonation](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/21-ci-impersonation-job-log.png){: .shadow.medium.center.wrap-text}\nJob log output with CI impersonation\n{: .note.text-center}\n\n### Running impersonation using access_as:impersonate:username\n\nThe last use case is the impersonation of a specific user or system account defined within the cluster. I have pre-created a service account called \"jane\" on the Kubernetes cluster under the \"default\" namespace. And \"jane\" has been given the permission to do a \"get\", \"list\", and \"watch\" on the cluster pods as you can see by the output in the command window:\n\n![Jane user with permission to list pods](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/22-jane-and-perms.png){: .shadow.medium.center.wrap-text}\nJane user with permission to list pods\n{: .note.text-center}\n\nRemember that the service account \"gitlab-agent\" under namespace \"ga4k-agent\" was created earlier when we installed the agent by running the Docker command. In order for the agent to be able to impersonate another service account or user, it needs to have the permissions to do so. We do this by creating a clusterrole \"impersonate\" for impersonating users, groups, and service accounts, and then create a clusterrolebinding \"allowimpersonator\" to give these permissions for the \"default\" namespace to the agent \"gitlab-agent\" in the \"ga4k-agent\" namespace:\n\n![Giving impersonation permission to agent](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/23-clusterrole-perm-to-agent.png){: .shadow.large.center.wrap-text}\nGiving impersonation permission to agent\n{: .note.text-center}\n\nWe then edit the agent's configuration and add the \"impersonate\" attribute and provide the service account for \"jane\" as the parameter for the \"username\" tag:\n\n![Impersonating a specific user](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/24-user-impersonation.png){: .shadow.medium.center.wrap-text}\nImpersonating a specific user called jane\n{: .note.text-center}\n\nAs we commit the changes, we check the log output to verify that the update has taken place in the running agent:\n\n![Agent updated with user impersonation](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/25-agent-conf-updated.png){: .shadow.large.center.wrap-text}\nAgent updated with user impersonation\n{: .note.text-center}\n\nSince we know that \"jane\" has the permission to list the running pods in the cluster, let's head to the project \"sample-application\" pipeline and add the command \"kubectl get pods –all-namespaces\" to it:\n\n![Adding get pods command that jane is allowed to run](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/26-adding-get-pods-cmd.png){: .shadow.medium.center.wrap-text}\nAdding get pods command that jane is allowed to run\n{: .note.text-center}\n\nWe commit the update and head over to the running pipeline and drill into the \"test\" job log output to see that the context switch was successful and that the kubectl commands executed correctly, including the listing of the running pods in the cluster:\n\n![Job output for pipeline impersonation jane](https://about.gitlab.com/images/blogimages/cicd-tunnel-impersonate/27-user-impersonation-job-log.png){: .shadow.medium.center.wrap-text}\nJob output for pipeline impersonation jane\n{: .note.text-center}\n\n## Conclusion\n\nIn this blog post, we reviewed how you can securely access your Kubernetes clusters from your CI/CD pipelines by using generic impersonation.  In addition, we showed the activity list of the GitLab Agent for Kubernetes, which can help you detect and troubleshoot faulty events from your cluster.\n\nTo see these capabilities in action, check out the following video:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/j8SJuHd7Zsw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nCover image by Jakob Søby on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[2331,832,937,1002],{"slug":3128,"featured":6,"template":678},"cicd-tunnel-impersonation","content:en-us:blog:cicd-tunnel-impersonation.yml","Cicd Tunnel Impersonation","en-us/blog/cicd-tunnel-impersonation.yml","en-us/blog/cicd-tunnel-impersonation",{"_path":3134,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3135,"content":3140,"config":3145,"_id":3147,"_type":16,"title":3148,"_source":17,"_file":3149,"_stem":3150,"_extension":20},"/en-us/blog/git-fetch-performance",{"title":3136,"description":3137,"ogTitle":3136,"ogDescription":3137,"noIndex":6,"ogImage":2478,"ogUrl":3138,"ogSiteName":692,"ogType":693,"canonicalUrls":3138,"schema":3139},"How we made Git fetch performance improvements in 2021, part 1","Our Scalability team tackled a server CPU utilization issue. Here's the first part of a detailed look at performance improvements we made for Git fetch.","https://about.gitlab.com/blog/git-fetch-performance","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we made Git fetch performance improvements in 2021, part 1\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jacob Vosmaer\"}],\n        \"datePublished\": \"2022-01-20\",\n      }",{"title":3136,"description":3137,"authors":3141,"heroImage":2478,"date":3142,"body":3143,"category":14,"tags":3144},[3103],"2022-01-20","\nIn this post we look back on a series of projects from the Scalability\nteam that improved GitLab server-side efficiency for serving Git fetch\ntraffic. In the benchmark described below we saw a 9x reduction in\nGitLab server CPU utilization. Most of the performance comes from the\nGitaly pack-objects cache, which has proven very effective at reducing\nthe Gitaly server load caused by highly concurrent CI pipelines.\n\nThese changes are not user-visible but they benefit the stability and\navailability of GitLab.com. If you manage a GitLab instance\nyourself you may want to [enable the pack-objects\ncache](https://docs.gitlab.com/ee/administration/gitaly/configure_gitaly.html#pack-objects-cache)\non your instance too.\n\nWe discuss how we achieved these improvements in [part 2](/blog/git-fetch-performance-2021-part-2/).\n\n## Background\n\nWithin the GitLab application, Gitaly is the component that acts as a\nremote procedure call (RPC) server for Git repositories. On\nGitLab.com, repositories are stored on persistent disks attached to\ndedicated Gitaly servers, and the rest of the application accesses\nrepositories by making RPC calls to Gitaly.\n\nIn 2020 we encountered several incidents on GitLab.com caused by the fact that\nour Gitaly server infrastructure [could not\nhandle](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/3013)\nthe Git fetch traffic generated by CI on our own main repository,\n[`gitlab-org/gitlab`](https://gitlab.com/gitlab-org/gitlab). The only reason the situation at the time worked\nwas because we had a custom CI caching solution for\n`gitlab-org/gitlab` only, commonly referred to as the \"CI pre-clone\nscript\".\n\n### The CI pre-clone script\n\nThe CI pre-clone script was an implementation of the [clone bundle CI\nfetching\nstrategy](https://www.kernel.org/best-way-to-do-linux-clones-for-your-ci.html).\nWe had originally set up the CI pre-clone script one year earlier, in\n[December 2019](https://gitlab.com/gitlab-org/gitlab/-/issues/39134).\nIt consisted of two parts.\n\n1.   A CI cron job that would clone `gitlab-org/gitlab`, pack up the\n   result into a tarball, and upload it to a known Google Cloud\n   Storage bucket.\n1.   A shell script snippet, stored in the `gitlab-org/gitlab` project settings, that was\n   injected into each `gitlab-org/gitlab` CI job. This shell script\n   would download and extract the latest tarball from the known URL.\n   After that the CI job did an incremental Git fetch, relative to the\n   tarball contents, to retrieve the actual CI pipeline commit.\n\nThis system was very effective. Our CI pipelines run against shallow\nGit clones of `gitlab-org/gitlab`, which require over 100MB of data to\nbe transfered per CI job. Because of the CI pre-clone script, the\namount of Git data per job was closer to 1MB. The rest of the data was\nalready there because of the tarball. The amount of repository data\ndownloaded by each CI job stayed the same, but only 1% of this data\nhad to come from a Gitaly server. This saved a lot of computation and\nbandwidth on the Gitaly server hosting `gitlab-org/gitlab`.\n\nAlthough this solution worked well, it had a number of downsides.\n\n1.   It was not part of the application and required per-project manual\n   set-up and maintenance.\n1.   It did not work for forks of `gitlab-org/gitlab`.\n1.   It had to be maintained in two places: the project that created the\n   tarball and the project settings of `gitlab-org/gitlab`.\n1.   We had no version control for the download script; this was just\n   text stored in the project's CI settings.\n1.   The download script was fragile. We had one case where we added an\n   `exit` statement in the wrong place, and all `gitlab-org/gitlab`\n   builds started silently using stale checkouts left behind by other\n   pipelines.\n1.   In case of a Google Cloud Storage outage, the full uncached traffic\n   would saturate the Gitaly server hosting `gitlab-org/gitlab`. Such\n   outages are rare but they do happen.\n1.   A user who would want to copy our solution would have to set up\n   their own Google Cloud Storage bucket and pay the bills for it.\n\nThe biggest issue really was that one year on, the CI pre-clone script\nhad not evolved from a custom one-off solution into an easy to use\nfeature for everyone.\n\nWe solved this problem by building the pack-objects cache, which we\nwill describe in more detail in the next blog post. Unlike the CI pre-clone script,\nwhich was a separate component, the pack-objects cache sits inside\nGitaly. It is always on, for all repositories and all users on\nGitLab.com. If you run your own GitLab server you can also use the\npack-objects cache, but you do have to [turn it on\nfirst](https://docs.gitlab.com/ee/administration/gitaly/configure_gitaly.html#pack-objects-cache).\n\n## Performance comparison\n\nTo illustrate what has changed we have created a benchmark. We set up a GitLab\nserver with a clone of `gitlab-org/gitlab` on it, and we configured a\nclient machine to perform 20 simultaneous shallow clones of the same commit using Git HTTP.[^ssh] This\nsimulates having a CI pipeline with 20 parallel jobs. The pack data is\nabout 87MB so in terms of bandwidth, we are transferring `20 * 87 =\n1740MB` of data.\n\n[^ssh]: As of GitLab 14.6, Git HTTP is 3x more CPU-efficient on the server than Git SSH. We are working on [improving the efficiency of Git SSH in GitLab](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/652). We prioritized optimizing Git HTTP because that is what GitLab CI uses.\n\nWe did this experiment with two GitLab servers. Both were Google\nCompute Engine `c2-standard-8` virtual machines with 8 CPU cores and\n32GB RAM. The operating system was Ubuntu 20.04 and we installed\nGitLab using our Omnibus packages.\n\n### Before\n\n- GitLab FOSS 13.7.9 (released December 2020)\n- Default Omnibus configuration\n\nThe 30-second [Perf flamegraph](https://www.brendangregg.com/FlameGraphs/cpuflamegraphs.html) below was captured at 99Hz across all CPU's.\n\n![Flamegraph of GitLab 13.7 performance](https://about.gitlab.com/images/blogimages/git-fetch-2021/before.jpg)\n\nSource: [SVG](/images/blogimages/git-fetch-2021/before.svg)\n\n### After\n\n- GitLab FOSS 14.6.1 (released December 2021)\n- One extra setting in `/etc/gitlab/gitlab.rb`:\n\n```ruby\ngitaly['pack_objects_cache_enabled'] = true\n```\n\n![Flamegraph of GitLab 14.6 performance with\ncache](https://about.gitlab.com/images/blogimages/git-fetch-2021/after.jpg)\n\nSource: [SVG](/images/blogimages/git-fetch-2021/after.svg)\n\n### Analysis\n\nServer CPU profile distribution:\n\n|Value|Before|After\n|---|---|---|\n|Benchmark run time|27s|7.5s|\n|`git` profile samples|18 552|923|\n|`gitaly` samples (Git RPC server process)|1 247|331|\n|`gitaly-hooks` samples (pack-objects cache client)||258|\n|`gitlab-workhorse` samples (application HTTP frontend)|1 057|237|\n|`nginx` samples (main HTTP frontend)|474|251|\n|Total CPU busy samples|21 720|2 328|\n|CPU utilization during benchmark|100%|40%|\n\n### Conclusion\n\nCompared to GitLab 13.6 (December 2020), GitLab 14.6 (December 2021) plus the\npack-objects cache makes the CI fetch benchmark in this post run 3.6x faster.\nAverage server CPU utilization during the benchmark dropped from 100%\nto 40%.\n\nStay tuned for part 2 of this blog post, in which we will go over the\nchanges we made to make this happen.\n\n## Related content\n\n- [Gitaly pack-objects cache documentation](https://docs.gitlab.com/ee/administration/gitaly/configure_gitaly.html#pack-objects-cache)\n- [Epic to improve Git SSH efficiency in GitLab](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/652)\n",[702,1286,704],{"slug":3146,"featured":6,"template":678},"git-fetch-performance","content:en-us:blog:git-fetch-performance.yml","Git Fetch Performance","en-us/blog/git-fetch-performance.yml","en-us/blog/git-fetch-performance",{"_path":3152,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3153,"content":3158,"config":3164,"_id":3166,"_type":16,"title":3167,"_source":17,"_file":3168,"_stem":3169,"_extension":20},"/en-us/blog/pipelines-as-code",{"title":3154,"description":3155,"ogTitle":3154,"ogDescription":3155,"noIndex":6,"ogImage":2478,"ogUrl":3156,"ogSiteName":692,"ogType":693,"canonicalUrls":3156,"schema":3157},"Pipelines-as-Code: How to improve speed from idea to production","Pipelines-as-Code streamline automatic building, testing, and deploying of applications using prebuilt pipelines and infrastructure components. Here's how it works.","https://about.gitlab.com/blog/pipelines-as-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Pipelines-as-Code: How to improve speed from idea to production\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Robert Williams\"}],\n        \"datePublished\": \"2022-01-18\",\n      }",{"title":3154,"description":3155,"authors":3159,"heroImage":2478,"date":3161,"body":3162,"category":14,"tags":3163},[3160],"Robert Williams","2022-01-18","\nToday’s DevOps platform-centric world is moving steadily towards an \"Everything-as-Code\" mentality. Add in cloud native, and it's clearly even more important to standardize how you define your DevOps processes.\n\n## Why ‘as-Code’?\n\nThanks to faster iteration, cloud native computing, and [microservices-based architectures]\n(https://about.gitlab.com/topics/microservices/), as-Code technologies have become the de-facto standard for a lot of different parts of the software development lifecycle. \n\nThe need to release faster requires a single spot for teams to collaborate on any kind of change – code, infrastructure, configuration, networking, or testing. And to implement that change quickly we need to be able to see and review it before it goes into production. \n\nAs-Code solutions are at the core of cloud native technologies such as Kubernetes, where you utilize YAML or JSON formats to configure and manage. Here are the key advantages of 'as-Code':\n\n- auditability\n- scalability\n- efficiency\n- collaboration\n\nThese benefits come into play with every piece of technology that moves into as-Code; we have seen it time and again as DevOps processes mature and we automate each piece of the software development lifecycle. Here are the critical 'as-Code' stages: \n\n### Build-as-Code\n\nOne of the first steps when building a new pipeline is to implement a way to build your application automatically. Containerization is one of the most common ways: You define your build steps as a Dockerfile and then you have automated the build of the application.\n\n### Test-as-Code\n\nAs our deployment frequency and team size scales, the need for test cases to be automated scales as well. So we automate, we write unit tests and test scripts to execute unit tests, and then we ensure the changes can be continuously integrated safely, without introducing unplanned bugs.\n\n### Security-as-Code\n\nTo ensure software gets to market quickly, security must be included in your testing process. The testing has to happen either through tools integrated with each individual project, or implemented as code, creating job templates for security scanners that can be ingested by projects as required. These steps enable teams to quickly become compliant with various security frameworks (like PCI-DSS, HIPAA,,or ISO) as they become relevant for the project.\n\n### Deployment-as-Code\n\nDeployments need to be standardized so they are predictable every time. To ensure successful peer review, production and development environment deployments need to be the same, and there's an added bonus of a quality gate between them. Through scripting and implementation of Deployment-as-Code, we end up with the ability to continuously deploy code and continuously deliver value.\n\n## Why Pipelines-as-Code?\n\nPipelines are the center of the CI/CD workflow – they're the automation heart that powers all of the benefits of as-Code technologies. Once you have the Build-as-Code, Test-as-Code, Deployment-as-Code, Infrastructure-as-Code, and Configuration-as-Code, you have all the parts needed to ensure that you can reliably and predictably take your application into production environments. But, to move changes in with agility, you need to take all those parts and string them together into a pipeline.\n\nThe technology behind Pipelines-as-Code makes it possible to create centralized repositories for your organization's pipelines. Pipelines-as-Code can be set up to fit all boxes for varied languages and use cases (like [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/)) or with a [number of options](https://gitlab.com/gitlab-org/gitlab/-/tree/master/lib/gitlab/ci/templates) so that developers can pick base pipelines to fit their use case. It's important to have a baseline that conforms to the organization's standards because that always increases the speed to production.\n\nThe entire team can collaborate on changes to each part of the workflow. Version history can be easily maintained in the same version control system as everything else that touches the DevOps lifecycle.\n\nThe benefits of as-Code technology reach a pinnacle with Pipelines-as-Code, so teams gain increases in efficiency, scalability, auditability, and collaboration. Pipelines-as-Code are at the center of automated GitOps, DevOps, and SecOps workflows.\n",[981,873,894],{"slug":3165,"featured":6,"template":678},"pipelines-as-code","content:en-us:blog:pipelines-as-code.yml","Pipelines As Code","en-us/blog/pipelines-as-code.yml","en-us/blog/pipelines-as-code",{"_path":3171,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3172,"content":3178,"config":3183,"_id":3185,"_type":16,"title":3186,"_source":17,"_file":3187,"_stem":3188,"_extension":20},"/en-us/blog/gitops-with-gitlab-using-ci-cd",{"title":3173,"description":3174,"ogTitle":3173,"ogDescription":3174,"noIndex":6,"ogImage":3175,"ogUrl":3176,"ogSiteName":692,"ogType":693,"canonicalUrls":3176,"schema":3177},"GitOps with GitLab: The CI/CD Tunnel","This is the fifth in a series of tutorials on how to do GitOps with GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667236/Blog/Hero%20Images/Learn-at-GL.jpg","https://about.gitlab.com/blog/gitops-with-gitlab-using-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitOps with GitLab: The CI/CD Tunnel\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2022-01-07\",\n      }",{"title":3173,"description":3174,"authors":3179,"heroImage":3175,"date":3180,"body":3181,"category":14,"tags":3182},[2014],"2022-01-07","\n\n_It is possible to use GitLab as a best-in-class GitOps tool, and this blog post series is going to show you how. These easy-to-follow tutorials will focus on different user problems, including provisioning, managing a base infrastructure, and deploying various third-party or custom applications on top of them. You can find the entire \"Ultimate guide to GitOps with GitLab\" tutorial series [here](/blog/the-ultimate-guide-to-gitops-with-gitlab/)._\n\nIn this article, we will see how you can access a Kubernetes cluster using GitLab CI/CD and why you might want to do it even if you aim for [GitOps](/topics/gitops/).\n\n## Prerequisites\n\nThis post assumes that you have a Kubernetes cluster connected to GitLab using the GitLab Kubernetes Agent. If you don't have such a cluster, I recommend consulting the previous posts (linked above) to have a similar setup from where we will start today.\n\n## Meet the CI/CD Tunnel\n\nThe GitLab Kubernetes Agent is not just a GitOps tool that will enable pull-based deployments and be one more application to maintain beside the other 70 in your DevOps stack. The GitLab Kubernetes Agent aims to serve the GitLab vision of providing you a single application for the whole DevSecOps lifecycle. As a result, the Agent's goal is to provide an integrated experience with every relevant GitLab feature.\n\nWhat GitLab features does the Agent integrate with today?\n\n- GitLab CI/CD\n- Container network security\n- Container host security\n- Container scanning\n\nIn this post, we will focus on the GitLab CI/CD integration. Given the power and flexibility of GitLab CI/CD, the majority of our users have been using it for years successfully and, until the Agent appeared, they often had to manually script their cluster connections and deployments into it. If the previous setup sounds familiar, I recommend checking out the Agent's CI/CD integration features, the CI/CD tunnel. The CI/CD tunnel enables a cluster connection to be used from GitLab CI/CD, thus you need only minor adjustments to your existing setup, and will receive a GitLab supported component that we are continuously expanding to provide more and more integrations on top of it.\n\nThe CI/CD tunnel is always enabled in the project where you register and configure the Agent, and the given connection can be shared by other groups and projects, too. This way, a single connection can be reused throughout the organization to save on resource and maintenance costs.\n\nGitLab automatically injects the available Kubernetes contexts into the CI/CD runner environment's `KUBECONFIG`. As a result, you can activate a context and start using it without much setup.\n\n## How to configure the CI/CD tunnel\n\nAs already mentioned, the CI/CD tunnel is always enabled in the project where you register and configure the Agent. If you would like to use the tunnel in the same repository, no configuration is needed. If you would like to share the connection with other repositories, open your agent configuration file and add the following lines:\n\n```yaml\nci_access:\n   projects:\n   - id: path/to/project\n   groups:\n   - id: path/to/group\n```\n\nChange the placeholder paths here to your project or group path. Sharing a connection with a group enables access to all the projects within that group. Once you save the configuration file, you can turn your attention to your application project repository, and use the following job to list and select an agent:\n\n```yaml\ndeploy:\n   image:\n     name: bitnami/kubectl:latest\n     entrypoint: [\"\"]\n   script:\n   - kubectl config get-contexts \n   - kubectl config use-context path/to/agent-configuration-project:your-agent-name\n```\n\n## How to install GitLab integrated applications into your cluster\n\nAs an application of the above, let's install some applications into the cluster. As various GitLab features require applications in your cluster to be installed and configured for GitLab, Gitlab provides a cluster management project template to help you get started. One can easily install these GitLab integrated applications into their clusters using this template. Let's see how to use it with the CI/CD tunnel and the Agent!\n\n### Create the cluster management project\n\nFirst, let's create a new GitLab project using the \"Cluster Management Project\" template. Open the [create new project from template page](https://gitlab.com/projects/new#create_from_template), search for \"GitLab Cluster Management\", and start a new project with that template.\n\nYou will receive a project that already contains quite a lot of things! It comes with a ready-made `.gitlab-ci.yml` file and [helmfile](https://github.com/roboll/helmfile) based setup for 11 applications that integrate with various GitLab functionalities. [Each application might require different configurations](https://docs.gitlab.com/ee/user/clusters/management_project_template.html#built-in-applications). You can read about these in the linked documentation.\n\nAs part of this article, we will install NGINX Ingress and GitLab Runners using the cluster management project.\n\n### How to share the CI/CD tunnel\n\nThis newly created project needs access to one of your clusters. Let's share an Agent's connection with this project as described above. Edit your agent configuration file and add:\n\n```yaml\nci_access:\n   projects:\n   - id: path/to/your/cluster/management/project\n```\n\n### Pick the right Kubernetes context\n\nThe CI/CD tunnel is already available from within your cluster management project. We tried to make it simple to start using a cluster connection without the need to edit `.gitlab-ci.yml`. For simple setups, you can just set a `KUBE_CONTEXT` environment variable with the path to and name of your agent.\n\nSet an environment variable under \"Settings\" / \"CI/CD\" / \"Variables\"\n\n![KUBE_CONTEXT variable setup](https://about.gitlab.com/images/blogimages/2022-01-07-gitops-with-gitlab-using-ci-cd/KUBE_CONTEXT_setting.png)\n\n### How to install NGINX Ingress\n\nWe are ready to install any of the supported applications using this agent connection! Let's start by installing NGINX Ingress as it does not require any application-specific configuration.\n\nIn your cluster management project, edit `helmfile.yaml` and uncomment the line that points to the `ingress` application. Commit the changes and wait for GitLab magic to happen!\n\nThis was really easy!\n\n### How to install GitLab Runner\n\nAs GitLab Runner is more integrated with GitLab, it needs a little bit of configuration. [The Runner should know](https://docs.gitlab.com/ee/user/infrastructure/clusters/manage/management_project_applications/runner.html#required-variables) where it can find your GitLab instance and needs a token to authenticate with GitLab.\n\nTo make it simple for you to install a Runner fleet, you can configure these as environment variables. By default the `CI_SERVER_URL` variable is used to specify the GitLab url. You can overwrite this if needed. For the token, you should create `GITLAB_RUNNER_REGISTRATION_TOKEN` as a masked and protected environment variable with the value of your Runner registration token. Feel free to use either a project or a group registration token.\n\nFinally, as with the Ingress installation, uncomment the related line in the `helmfile.yaml`.\n\n## The full potential of the cluster management project\n\nThe cluster management project you created is yours. Thus, you are free to change it, extend it, or get rid of it. In this section, I would like to share with you a few ideas of how you might benefit the most from it.\n\n### Did you move away from Helm v2 already?\n\nThe `.gitlab-ci.yml` file in the cluster management project has a job that supports users to upgrade their Helm v2 installations to v3. If you never had these applications installed through a cluster management project with Helm v2, then you don't need that job. Feel free to delete it from your CI yaml.\n\n### Extend the project with your own apps\n\nThe cluster management project is self-contained as is. You can add your own helm/helmfile based application setups to it. To get started, I recommend to check out the [helmfile](https://github.com/roboll/helmfile) README.\n\n### Stay up to date\n\nWe want you to own the cluster management project, so you can upgrade the applications independently of GitLab releases. Still, you might prefer to follow GitLab releases, too, as you can expect improvements to the cluster management project template. How can you do that?\n\nIf you followed the `kpt` based Agent installation setup, you know that `kpt` can check out a git subtree and merge local changes with upstream changes when you request an update. You can use `kpt` here, too! \n\nAs you manage the cluster management project, you can replace selected applications with their `kpt` checkouts. For example, you can start following the upstream template with:\n\n```bash\ncd applicatioins\nrm -rf prometheus\nkpt pkg get https://gitlab.com/gitlab-org/project-templates/cluster-management.git/applications/prometheus prometheus\n```\n\nand update to the most recent version by running:\n\n```bash\nkpt pkg update applications/prometheus\n```\n\n## Recap\n\nAs we have seen in this article, the GitLab Kubernetes Agent provides way more possibilities than focused GitOps tools do. Besides supporting pull-based deployments, we support GitLab users with integrating into their existing CI/CD based workflows. Moreover, a Cluster Management Project template ships with GitLab that supplements the various GitLab integrations to simplify getting started with them.\n\n## What's next\n\nBuilding on our knowledge of the CI/CD tunnel, in the next article we will look into how to use Auto DevOps with the Agent.\n\n_[Click here](/blog/the-ultimate-guide-to-gitops-with-gitlab/) for the next tutorial._\n\n\n\n\n\n",[1002,894,727],{"slug":3184,"featured":6,"template":678},"gitops-with-gitlab-using-ci-cd","content:en-us:blog:gitops-with-gitlab-using-ci-cd.yml","Gitops With Gitlab Using Ci Cd","en-us/blog/gitops-with-gitlab-using-ci-cd.yml","en-us/blog/gitops-with-gitlab-using-ci-cd",{"_path":3190,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3191,"content":3197,"config":3203,"_id":3205,"_type":16,"title":3206,"_source":17,"_file":3207,"_stem":3208,"_extension":20},"/en-us/blog/how-to-bring-devops-to-the-database-with-gitlab-and-liquibase",{"title":3192,"description":3193,"ogTitle":3192,"ogDescription":3193,"noIndex":6,"ogImage":3194,"ogUrl":3195,"ogSiteName":692,"ogType":693,"canonicalUrls":3195,"schema":3196},"How to bring DevOps to the database with GitLab and Liquibase","Learn how to build a continuous delivery pipeline for database code changes with this tutorial.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672677/Blog/Hero%20Images/metalgears_databasecasestudy.jpg","https://about.gitlab.com/blog/how-to-bring-devops-to-the-database-with-gitlab-and-liquibase","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to bring DevOps to the database with GitLab and Liquibase\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tsvi Zandany\"}],\n        \"datePublished\": \"2022-01-05\",\n      }",{"title":3192,"description":3193,"authors":3198,"heroImage":3194,"date":3200,"body":3201,"category":14,"tags":3202},[3199],"Tsvi Zandany","2022-01-05","\nIn the [Accelerate State of DevOps 2021 Report](https://cloud.google.com/devops/state-of-devops/), the DevOps Research and Assessment (DORA) team reveals “elite DevOps performers are 3.4 times more likely to exercise database change management compared to their low-performing counterparts.” Tracking changes with version control is not just for application code, though. It’s crucial for managing changes for one of your most important assets: your database.   \n\nThe GitLab DevOps platform enables database management teams to leverage CI/CD to track, manage, and deploy database changes, along with application development and automation and infrastructure as code. Database change management tools have become more advanced in recent years, supporting easier collaboration and communication, which are the keys to successful DevOps. In this blog post, I’ll take you through a tutorial using [Liquibase](https://www.liquibase.com), a tool that integrates seamlessly into the GitLab DevOps platform so your teams can deliver database code changes as fast as application code changes (without compromising on quality and security). \n\n## What is Liquibase?\n\nLiquibase was founded as an open source project over 15 years ago to address getting database changes into version control. With more than 75 million downloads, the company behind Liquibase expanded to paid editions and support to help teams release software faster and safer by bringing the database change process into their existing CI/CD automation.  \n\nIntegrating Liquibase with GitLab CI/CD enables database teams to leverage DevOps automation and best practices for database management. Liquibase helps teams build automated database scripts and gain insights into when, where, and how database changes are deployed. In this tutorial, we’ll demonstrate how to check database scripts for security and compliance issues, speed up database code reviews, perform easy rollbacks, and provide database snapshots to check for malware.\n\n## Adding Liquibase to GitLab’s DevOps Platform\n\nTeams can add Liquibase to GitLab to enable true CI/CD for the database. It’s easy to integrate Liquibase into your GitLab CI/CD pipeline. Before jumping into the tutorial, let’s take a look at the [example Liquibase GitLab project repository](https://gitlab.com/gitlab-com/alliances/liquibase/sandbox-projects/sql_server) you’ll be using.\n\n### Understanding the example Liquibase GitLab project repository\n\n![A CI/CD pipeline diagram](https://about.gitlab.com/images/blogimages/1_CICD_Pipeline_Diagram.png){: .shadow.small.center}\n\nFor this example, the GitLab CI/CD pipeline environments include DEV, QA, and PROD. This pipeline goes through several stages: build, test, deploy, and compare. A post stage comes into play later to capture a snapshot of your database in Production.\n\nStages:\n  - build\n  - test\n  - deploy\n  - compare\n\n### Liquibase commands in the pipeline\n\nFor each of the predefined jobs in the GitLab repository, you’ll be using several Liquibase commands to help manage database changes quickly and safely:\n\n- liquibase_job:\n\n  before_script:\n    - functions\n    - isUpToDate\n    - liquibase checks run\n    - liquibase updateSQL\n    - liquibase update\n    - liquibase rollbackOneUpdate --force\n    - liquibase tag $CI_PIPELINE_ID\n    - liquibase --logFile=${CI_JOB_NAME}_${CI_PIPELINE_ID}.log --logLevel=info update\n    - liquibase history\n\n  script:\n    - echo \"Comparing databases DEV --> QA\"\n    - liquibase diff\n    - liquibase --outputFile=diff_between_DEV_QA.json diff --format=json\n\n  script:\n    - echo \"Snapshotting database PROD\"\n    - liquibase --outputFile=snapshot_PROD.json snapshot --snapshotFormat=json\n\nLearn more about each of these commands in the [README file in the GitLab repository](https://gitlab.com/gitlab-com/alliances/liquibase/sandbox-projects/sql_server/-/blob/main/README.md). \n\n## Tutorial\n\nThe following tutorial demonstrates how to run Liquibase in a GitLab CI/CD pipeline. Follow along by watching this companion video:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/ZBFhDayoRYo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Prerequisites\n\nTo start, I’m using a Linux machine with the following:\n\n- [A GitLab account](https://www.gitlab.com)\n- Self-managed Runner on a Linux machine\n- Git\n- Java 11\n- Access to a SQL Server database with multiple environments\n\n### Download, install, and configure Liquibase\n\n[Download Liquibase v4.6.1+](https://www.liquibase.org/download)\n\n[Install Liquibase](https://docs.liquibase.com/concepts/installation/installation-linux-unix-mac.html)\n\n[Get a free Liquibase Pro license key](https://www.liquibase.com/trial). No credit card is required, so you can play with all the advanced features and get support for 30 days. You’ll use this key later when you configure environment variables within GitLab.\n\nEnsure Liquibase is installed properly by running the liquibase --version command. If everything is good you’ll see the following:\n\nStarting Liquibase at 18:10:06 (version 4.6.1 #98 built at 2021-11-04 20:16+0000)\nRunning Java under /usr/lib/jvm/java-11-openjdk-11.0.13.0.8-1.el7_9.x86_64 (Version 11.0.13)\n\nLiquibase Version: 4.6.1\nLiquibase Community 4.6.1 by Liquibase\n\n### Prepare your GitLab project\n\nFork this [example GitLab project repository](https://gitlab.com/gitlab-com/alliances/liquibase/sandbox-projects/sql_server). ([See more information about forking a repository](https://docs.gitlab.com/ee/user/project/repository/forking_workflow.html).)\n\n[Create a self-managed GitLab Runner](https://docs.gitlab.com/runner/) on your Linux instance with your newly forked GitLab project.\n\nClone your newly forked project repository:\ngit clone https://gitlab.com/\u003Cusername>/sql_server.git\n\nGo to the “sql_server” project folder.\ncd sql_server\n\nRun the following command to change your git branch to staging:\ngit checkout staging\n\nConfigure the GitLab CI/CD pipeline environment variables.\n\nYour configuration will include [CI/CD variables](https://docs.gitlab.com/ee/ci/variables/#add-a-cicd-variable-to-a-project), [Liquibase properties](https://www.liquibase.com/blog/secure-database-developer-flow-using-gitlab-pipelines), database credentials, and the Liquibase Pro trial license key so you can use all the advanced Liquibase commands.\n\nFrom the main sql_server project, go to Settings → CI/CD\n\nUnder Variables, click Expand and add the following variables:\n\n![A CI/CD pipeline diagram](https://about.gitlab.com/images/blogimages/liquibasevariables.png){: .shadow.small.center}\n\n![A CI/CD pipeline diagram](https://about.gitlab.com/images/blogimages/liquibasevariables2.png){: .shadow.small.center}\n\n### Configure the self-managed GitLab runner\n\nFrom the main sql_server project, go to Settings → CI/CD\n\nExpand the runners section, click the pencil edit icon, and add the following runner tags (comma separated):\n\ndev_db,prod_db,test_db\n\nNote: Tags are created to help choose which runner will do the job. In this example, we are associating all tags to one runner. Learn more about [configuring runners](https://docs.gitlab.com/ee/ci/runners/configure_runners.html). \n\n### Make changes to the database\n\nEdit the changelog.sql file and add the following changeset after \n\n```\nliquibase formatted sql:\n-- changeset SteveZ:createTable_salesTableZ\nCREATE TABLE salesTableZ (\n   ID int NOT NULL,\n   NAME varchar(20) COLLATE SQL_Latin1_General_CP1_CI_AS NULL,\n   REGION varchar(20) COLLATE SQL_Latin1_General_CP1_CI_AS NULL,\n   MARKET varchar(20) COLLATE SQL_Latin1_General_CP1_CI_AS NULL\n)\n--rollback DROP TABLE salesTableZ\nAdd, commit, and push all new database changes.\ngit add changelog.sql\ngit commit -m “added changelog id and a create table salesTableZ changeset”\ngit push -u origin staging\n```\n\n### Merge the changes and run the pipeline\n\nLet’s merge the changes from branch staging → main to trigger the pipeline to run all jobs.\n\nClick Merge requests → New merge request\n\nSelect staging as Source branch and main as Target branch\n\nClick Compare branches and continue\n\nOn the next screen, click Create merge request\n\nClick Merge to finish merging the changes\n\n![A look at the merge request](https://about.gitlab.com/images/blogimages/2_Merge_Request1.png){: .shadow.small.center}\n\n![Another look at the merge requestt](https://about.gitlab.com/images/blogimages/3_Merge_Request2.png){: .shadow.small.center}\n\nOnce these steps are completed, the code is merged into main and the pipeline is triggered to run.\n\n![The pipeline is triggered](https://about.gitlab.com/images/blogimages/4_Merge_Request3.png){: .shadow.small.center}\n\nTo see the pipeline running, click Pipelines.\n\nTo view the pipeline progress, click the pipeline ID link. You can view each job’s log output by clicking on each job name.\n\n![The pipeline in progress](https://about.gitlab.com/images/blogimages/5_Pipeline_Progress.png){: .shadow.small.center}\n\nClicking into the build-job example:\n\nThe liquibase checks run command validates the SQL for any violations.\n\n```\n57Starting Liquibase at 22:19:14 (version 4.6.1 #98 built at 2021-11-04 20:16+0000)\n58Liquibase Version: 4.6.1\n59Liquibase Pro 4.6.1 by Liquibase licensed to customersuccess until Mon Jun 27 04:59:59 UTC 2022\n60Executing Quality Checks against changelog.sql\n61Executing all checks because a valid Liquibase Pro license was found!\n62Changesets Validated:\n63  ID: createTable_salesTableZ; Author: SteveZ; File path: changelog.sql\n64Checks run against each changeset:\n65  Warn on Detection of 'GRANT' Statements\n66  Warn on Detection of 'REVOKE' Statements\n67  Warn when 'DROP TABLE' detected\n68  Warn when 'DROP COLUMN' detected\n69  Check for specific patterns in sql (Short Name: SqlCreateRoleCheck)\n70  Warn when 'TRUNCATE TABLE' detected\n71  Warn on Detection of grant that contains 'WITH ADMIN OPTION'\n72Liquibase command 'checks run' was executed successfully.\n```\n\nThe liquibase update command deploys the changes. If you choose, you can view a full report of your changes in [Liquibase Hub](https://docs.liquibase.com/tools-integrations/liquibase-hub/setup.html). The update command also saves the deployment log output file as an artifact.\n\n```\n227Starting Liquibase at 22:19:34 (version 4.6.1 #98 built at 2021-11-04 20:16+0000)\n228Liquibase Version: 4.6.1\n229Liquibase Pro 4.6.1 by Liquibase licensed to customersuccess until Mon Jun 27 04:59:59 UTC 2022\n230----------------------------------------------------------------------\n231View a report of this operation at https://hub.liquibase.com/r/I7ens13ooM\n232* IMPORTANT: New users of Hub first need to Sign In to your account\n233with the one-time password sent to your email, which also serves as\n234your username.\n235----------------------------------------------------------------------\n236Logs saved to /home/gitlab-runner/builds/3-UvD4aX/0/szandany/sql_server/build-job_405710044.log\n237Liquibase command 'update' was executed successfully.\n```\n\nHere’s what your Liquibase Hub report will look like:\n\n![The hub report, part one](https://about.gitlab.com/images/blogimages/6_LiquibaseHub_Report.png){: .shadow.small.center}\n\n![The hub report, part twot](https://about.gitlab.com/images/blogimages/7_LiquibaseHub_Report.png){: .shadow.small.center}\n\nThe Liquibase history command will show what changes are currently in the database.\n\n```\n255Starting Liquibase at 22:19:40 (version 4.6.1 #98 built at 2021-11-04 20:16+0000)\n256Liquibase Version: 4.6.1\n257Liquibase Pro 4.6.1 by Liquibase licensed to customersuccess until Mon Jun 27 04:59:59 UTC 2022\n258Liquibase History for jdbc:sqlserver://localhost:1433;sendTemporalDataTypesAsStringForBulkCopy=true;delayLoadingLobs=true;useFmtOnly=false;useBulkCopyForBatchInsert=false;cancelQueryTimeout=-1;sslProtocol=TLS;jaasConfigurationName=SQLJDBCDriver;statementPoolingCacheSize=0;serverPreparedStatementDiscardThreshold=10;enablePrepareOnFirstPreparedStatementCall=false;fips=false;socketTimeout=0;authentication=NotSpecified;authenticationScheme=nativeAuthentication;xopenStates=false;sendTimeAsDatetime=true;trustStoreType=JKS;trustServerCertificate=false;TransparentNetworkIPResolution=true;serverNameAsACE=false;sendStringParametersAsUnicode=true;selectMethod=direct;responseBuffering=adaptive;queryTimeout=-1;packetSize=8000;multiSubnetFailover=false;loginTimeout=15;lockTimeout=-1;lastUpdateCount=true;encrypt=false;disableStatementPooling=true;databaseName=DEV;columnEncryptionSetting=Disabled;applicationName=Microsoft JDBC Driver for SQL Server;applicationIntent=readwrite;\n259- Database updated at 11/9/21, 10:19 PM. Applied 1 changeset(s), DeploymentId: 6496372605\n260  liquibase-internal::1636496372758::liquibase\n261- Database updated at 11/9/21, 10:19 PM. Applied 1 changeset(s), DeploymentId: 6496375151\n262  changelog.sql::createTable_salesTableZ::SteveZ\n263Liquibase command 'history' was executed successfully.\n```\n\n### Clicking into the DEV->QA job example from your pipeline\n\nWe run the liquibase diff command to compare the DEV and QA databases. This helps detect any drift between the databases.\n\nNotice in the log output that there are some unexpected changes: \n\ntable named bad_table\n\nprocedure named bad_proc\n\n![The diff report](https://about.gitlab.com/images/blogimages/8_LiquibaseDiff_Report.png){: .shadow.small.center}\n\nBy using the [Liquibase Pro trial license key](https://www.liquibase.com/trial), you’re able to detect any stored logic objects included in the diff report. Liquibase Pro also allows you to generate a parsable JSON output file and save it as an artifact for later use.\n\n```\n137Starting Liquibase at 22:21:10 (version 4.6.1 #98 built at 2021-11-04 20:16+0000)\n138Liquibase Version: 4.6.1\n139Liquibase Pro 4.6.1 by Liquibase licensed to customersuccess until Mon Jun 27 04:59:59 UTC 2022\n140Output saved to /home/gitlab-runner/builds/3-UvD4aX/0/szandany/sql_server/diff_between_DEV_QA.json\n141Liquibase command 'diff' was executed successfully.\n```\n\nJSON artifact output file example:\n\n```\n{\n    \"diff\": {\n        \"diffFormat\": 1,\n        \"created\": \"Wed Dec 08 20:16:53 UTC 2021\",\n        \"databases\": {\n            \"reference\": {\n                \"majorVersion\": \"14\",\n                \"minorVersion\": \"00\",\n                \"name\": \"Microsoft SQL Server\",\n                \"url\": \"jdbc:sqlserver://localhost:1433;databaseName=DEV; ...\"\n            },\n            \"target\": {\n                \"majorVersion\": \"14\",\n                \"minorVersion\": \"00\",\n                \"name\": \"Microsoft SQL Server\",\n                \"url\": \"jdbc:sqlserver://localhost:1433;databaseName=QA; ...\"\n            }\n        },\n        \"unexpectedObjects\": [\n            {\n                \"unexpectedObject\": {\n                    \"name\": \"bad_proc\",\n                    \"type\": \"storedProcedure\",\n                    \"schemaName\": \"dbo\",\n                    \"catalogName\": \"QA\"\n                }\n            },\n            {\n                \"unexpectedObject\": {\n                    \"name\": \"bad_table\",\n                    \"type\": \"table\",\n                    \"schemaName\": \"dbo\",\n                    \"catalogName\": \"QA\"\n                }\n            },\n            {\n                \"unexpectedObject\": {\n                    \"name\": \"MARKET\",\n                    \"type\": \"column\",\n                    \"relationName\": \"bad_table\",\n                    \"schemaName\": \"dbo\",\n                    \"catalogName\": \"QA\"\n                }\n            },\n            {\n                \"unexpectedObject\": {\n                    \"name\": \"ID\",\n                    \"type\": \"column\",\n                    \"relationName\": \"bad_table\",\n                    \"schemaName\": \"dbo\",\n                    \"catalogName\": \"QA\"\n                }\n            },\n            {\n                \"unexpectedObject\": {\n                    \"name\": \"NAME\",\n                    \"type\": \"column\",\n                    \"relationName\": \"bad_table\",\n                    \"schemaName\": \"dbo\",\n                    \"catalogName\": \"QA\"\n                }\n            },\n            {\n                \"unexpectedObject\": {\n                    \"name\": \"REGION\",\n                    \"type\": \"column\",\n                    \"relationName\": \"bad_table\",\n                    \"schemaName\": \"dbo\",\n                    \"catalogName\": \"QA\"\n                }\n            }\n        ],\n        \"changedObjects\": [\n            {\n                \"changedObject\": {\n                    \"name\": \"QA\",\n                    \"type\": \"catalog\",\n                    \"differences\": [\n                        {\n                            \"difference\": {\n                                \"comparedValue\": \"QA\",\n                                \"field\": \"name\",\n                                \"message\": \"name changed from 'DEV' to 'QA'\",\n                                \"referenceValue\": \"DEV\"\n                            }\n                        }\n                    ]\n                }\n            }\n        ]\n    }\n}\n\n```\n\nNote that the [Liquibase diffChangelog](https://docs.liquibase.com/commands/diffchangelog.html) can help any baseline environments that have drifted. \n\nClicking into the snapshot PROD job example, the snapshot file contains all the current schema changes represented in a JSON file. You can obtain the PROD database snapshot file to compare two states of the same database to protect against malware with drift detection.\n\n```\n58Starting Liquibase at 22:21:32 (version 4.6.1 #98 built at 2021-11-04 20:16+0000)\n59Liquibase Version: 4.6.1\n60Liquibase Pro 4.6.1 by Liquibase licensed to customersuccess until Mon Jun 27 04:59:59 UTC 2022\n61Output saved to /home/gitlab-runner/builds/3-UvD4aX/0/szandany/sql_server/snapshot_PROD.json\n62Liquibase command 'snapshot' was executed successfully. \n64Uploading artifacts for successful job00:01\n70Cleaning up project directory and file based variables00:00\n72Job succeeded\n```\n\n### Congratulations! The pipeline ran successfully.\n\nIf all the jobs are successful, you’ll see a green checkmark right next to each one.\n\nHere’s what your database changes will look like with a database SQL query tool.\n\n![The database](https://about.gitlab.com/images/blogimages/9_Database_Changes_SQL_Query_Tool.png){: .shadow.small.center}\n\n## Summing it up\n\nYou’ve now successfully run Liquibase in a GitLab pipeline to enable true CI/CD for the database. You can easily keep adding more changes to the database by adding more Liquibase changesets to the changelog, commit them to GitLab version control, and repeat the merge request process described in this tutorial to add the changes. \n\nStill have questions or want support integrating Liquibase with your Gitlab CI/CD Pipeline? Our team of database DevOps experts is happy to help! \n\n[Contact Liquibase](https://www.liquibase.com/contact)\n\n[Contact GitLab](/sales/)\n\nContact a [certified GitLab channel partner](https://www.google.com/url?q=https://partners.gitlab.com/English/directory/&sa=D&source=docs&ust=1641393355697069&usg=AOvVaw0R5mPukwMBR2dKsn3eQzqp)\n\nContact a [Liquibase channel partner](https://www.liquibase.com/partners)\n\nOther useful links: \n\n[Gitlab CI/CD setup Liquibase documentation](https://docs.liquibase.com/concepts/installation/setup-gitlab-cicd.html)\n\n[GitLab - Liquibase repository](https://gitlab.com/gitlab-com/alliances/liquibase/sandbox-projects/liquibasegitlabcicd/-/blob/master/README.md) \n\nGet a [speedy, secure database developer flow](https://www.liquibase.com/blog/secure-database-developer-flow-using-gitlab-pipelines) using GitLab pipelines & Liquibase\n\n_Author Tsvi Zandany is a Senior Solutions Architect at Liquibase_\n",[232,832,937],{"slug":3204,"featured":6,"template":678},"how-to-bring-devops-to-the-database-with-gitlab-and-liquibase","content:en-us:blog:how-to-bring-devops-to-the-database-with-gitlab-and-liquibase.yml","How To Bring Devops To The Database With Gitlab And Liquibase","en-us/blog/how-to-bring-devops-to-the-database-with-gitlab-and-liquibase.yml","en-us/blog/how-to-bring-devops-to-the-database-with-gitlab-and-liquibase",{"_path":3210,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3211,"content":3216,"config":3221,"_id":3223,"_type":16,"title":3224,"_source":17,"_file":3225,"_stem":3226,"_extension":20},"/en-us/blog/gitlab-chart-works-towards-kubernetes-1-22",{"title":3212,"description":3213,"ogTitle":3212,"ogDescription":3213,"noIndex":6,"ogImage":2807,"ogUrl":3214,"ogSiteName":692,"ogType":693,"canonicalUrls":3214,"schema":3215},"GitLab Chart works towards Kubernetes 1.22","New minimum version is 1.19 for in-chart NGINX Ingress Controller.","https://about.gitlab.com/blog/gitlab-chart-works-towards-kubernetes-1-22","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Chart works towards Kubernetes 1.22\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2021-12-17\",\n      }",{"title":3212,"description":3213,"authors":3217,"heroImage":2807,"date":3218,"body":3219,"category":14,"tags":3220},[890],"2021-12-17","\n\nWe are working to make the GitLab Chart and the GitLab Operator support Kubernetes 1.22, which requires updating the NGINX Ingress Controller used within the Chart and Operator.\n\nThis update requires that we drop support for versions of Kubernetes prior to 1.19 if using the in-chart NGINX Ingress Controller. Users that still require support for Kubernetes 1.18 and prior releases will only be able to deploy up to Chart version 5.5.x.\n\n## More details on the changes\n\nGitLab uses a [forked version](https://docs.gitlab.com/charts/charts/nginx/fork.html) of the community-supported ingress-nginx Chart to expose the GitLab components via Ingresses. \n\nSupporting Kubernetes 1.22 requires updating the included NGINX Ingress Controller to [version 1.0.4](https://github.com/kubernetes/ingress-nginx/releases/tag/controller-v1.0.4) in order to support the networking.k8s.io/v1 API in Kubernetes 1.22. The previous networking API (networking.k8s.io/v1beta1) has been deprecated since Kubernetes 1.19 and removed in Kubernetes 1.22.\n\nAs a result of the upgrade, we are bound to the breaking change of NGINX Ingress Controller, removing support before Kubernetes 1.19. They provide more clarification in [their FAQ](https://kubernetes.github.io/ingress-nginx/#faq-migration-to-apiversion-networkingk8siov1).\n\nThe forked ingress-nginx Chart is based on [version 4.0.6](https://artifacthub.io/packages/helm/ingress-nginx/ingress-nginx/4.0.6) of ingress-nginx/ingress-nginx, which uses [version 1.0.4](https://github.com/kubernetes/ingress-nginx/releases/tag/controller-v1.0.4) of the NGINX Ingress Controller.\n\n## Who is impacted\n\nAny deployment which is making use of the NGINX Ingress Controller provided by the GitLab Chart. This covers most, but far from all, users of our Helm Chart and Operator. If you are using an alternate Ingress provider (such as AWS ALB, Azure Application Gateway, or Google GCE Ingress), you will not be affected.\n\n## What to expect\n\nWe recognize that this change may have unintended effects, but most GitLab instances will seamlessly transition to the new NGINX Ingress Controller without incident. As always, we recommend a backup be created prior to upgrading the GitLab Chart or GitLab Operator, which will allow your data to be safeguarded should a recovery be necessary, caused by complications in the upgrade.\n\nDepending upon the environment and/or cloud provider, it is possible that when NGINX Ingress Controller is replaced during the upgrade process that the IP addresses associated with the Ingresses may change. This may require that the DNS records for the GitLab instance be updated if a controller such as external-dns is not managing the DNS records. The DNS records related to the following Ingress objects may be affected:\n\n* gitlab.\n* registry.\n* minio. (if used)\n* kas. (if used)\n\nIf the GitLab Pages component is enabled, there may be other DNS records that will need to be updated to connect to the proper Ingress.\n\n## What if there is a problem with the upgrade?\n\nWhile it is not expected that an upgrade will cause a problem, not all environments or configurations can be anticipated. In the event that there is an upgrade problem, please contact GitLab Support if you are a licensed customer. If you are running the Community Edition of GitLab, please open an issue in the [GitLab Chart](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/new?issue%5Bmilestone_id%5D=) or [GitLab Operator](https://gitlab.com/gitlab-org/cloud-native/gitlab-operator/-/issues/new?issue%5Bmilestone_id%5D=) projects.\n",[894,727,1002],{"slug":3222,"featured":6,"template":678},"gitlab-chart-works-towards-kubernetes-1-22","content:en-us:blog:gitlab-chart-works-towards-kubernetes-1-22.yml","Gitlab Chart Works Towards Kubernetes 1 22","en-us/blog/gitlab-chart-works-towards-kubernetes-1-22.yml","en-us/blog/gitlab-chart-works-towards-kubernetes-1-22",{"_path":3228,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3229,"content":3235,"config":3240,"_id":3242,"_type":16,"title":3243,"_source":17,"_file":3244,"_stem":3245,"_extension":20},"/en-us/blog/how-to-automate-localization-for-flutter-apps",{"title":3230,"description":3231,"ogTitle":3230,"ogDescription":3231,"noIndex":6,"ogImage":3232,"ogUrl":3233,"ogSiteName":692,"ogType":693,"canonicalUrls":3233,"schema":3234},"How to automate localization for Flutter apps","Follow this tutorial to learn how to simplify the localization process on GitLab with Localizely.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679465/Blog/Hero%20Images/flutterbanner.png","https://about.gitlab.com/blog/how-to-automate-localization-for-flutter-apps","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to automate localization for Flutter apps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2021-12-10\",\n      }",{"title":3230,"description":3231,"authors":3236,"heroImage":3232,"date":3237,"body":3238,"category":14,"tags":3239},[890],"2021-12-10","\n\nLocalization is an indispensable part of today's software. Almost all successful companies strive to adapt their products to different languages, regions, and cultures. Customer satisfaction is crucial for business. However, that often comes at a cost in terms of the higher complexity in software development and maintenance. In addition to regular activities, you must also take care of translation, its synchronization with development processes, and the like.\n\nThe question is: Can we somehow simplify the localization process and make it more agile? The answer is “yes.\" Below, you can see how GitLab and the [Localizely](https://localizely.com/) platform can help. For that purpose, we will use a simple Flutter project. However, the same approach can be applied to other programming languages and frameworks.\n\n## A few words about the Flutter project\n\nFlutter is an open-source framework developed by Google for building multi-platform apps from a single codebase. It has become quite popular lately, as it solves some things much better than some other solutions (hot-reload, performance, etc.). Since the point of this post is the automation of localization, we will not deal with Flutter too much. But we will certainly highlight some important things regarding localization in Flutter projects.\n\nWhatever approach you used to create and localize your Flutter project, its structure would probably be similar to the one below. \n\n![Flutter project structure](https://about.gitlab.com/images/blogimages/fluttergraphic.png){: .shadow.small.left}\n\nAbove, you can see the l10n folder with the two [ARB](https://localizely.com/flutter-arb/) files. Each ARB file contains translations for one language in the Flutter project (i.e. intl_de.arb for German and intl_en.arb for English). Whenever we want to add, modify, or remove a translation, we need to update those files. In other words, those files are the basis of localization in Flutter projects. They separate programming from translation but require synchronization with your code so that each message has a corresponding translation.\n\n## The usual way of localization\n\nThere is no exact rule or process that describes the usual way of localization. However, we could roughly describe it as the routine of a few steps:\n\n1. The developer updates code and the main ARB file.\n2. The developer sends ARB files to the project manager.\n3. The project manager sends ARB files to translators (e.g. email, upload to localization platform, etc.).\n4. The translators work on translations.\n5. The project manager forwards translated ARB files to the developer.\n6. The developer updates the Flutter project with new translations.\n\nIn this simplified case of localization, we can already notice some tasks that drain a lot of time and can be a bottleneck. Those are steps 2, 3, and 5. Moreover, these six steps can be frequent (e.g. update of the UI, new feature, etc.), which is not exactly the optimal solution. And that is even truer for medium and large teams. Just imagine how much time is wasted on file sharing when you have to coordinate in a team of 10+ people. Not to mention the problem with outdated ARB files.\n\n## Automated localization\n\nSince you've seen some flaws in the usual way of localization, let's see how we can optimize that.\n\n1. The developer updates code, the main ARB file, and pushes changes to GitLab.\n2. GitLab informs Localizely via webhook regarding new changes. \n3. Localizely fetches ARB files from GitLab and lets translators work on translations.\n4. The project manager pushes updated ARB files to GitLab via [MR](https://docs.gitlab.com/ee/user/project/merge_requests/).\n5. The developer updates the Flutter project with new translations (merge MR).\n\nThis way of working enables everyone to do their job more efficiently. Developers can be focused on the development of the product, translators on translations, managers on management, and similar. It should also be noted that with this type of workflow, you can easily accelerate the development and delivery of new features, which is in everyone's interest.\n\nTo make this workflow possible, you need to adjust a few things. In the following, you can see the necessary settings.\n\n1. Add a [localizely.yml](https://localizely.com/configuration-file/) config file to the root of your Flutter project. \n2. Set up [GitLab integration](https://localizely.com/gitlab-integration/) on the Localizely platform. \n3. Add a webhook to the GitLab repository.\n\nAnd that’s all. You have automated localization on your Flutter project. Whenever the developer pushes the changes to GitLab, the translators will see new string keys on the Localizely. Once the translation is done, a single click on the button creates a new MR with the latest translations on GitLab. There is no need for a mediator, waiting, or sending ARB files for every little thing. Now you can have more time for other things as this tedious work is automated.\n\n## Final thoughts\n\nIn this post, you have seen the most common steps of localization in Flutter projects and how to automate some of them. Knowing how important efficiency is today, we should strive to automate repetitive tasks as much as possible. As someone once said, “Lost time is never found again”.\n\n",[894,1286,726],{"slug":3241,"featured":6,"template":678},"how-to-automate-localization-for-flutter-apps","content:en-us:blog:how-to-automate-localization-for-flutter-apps.yml","How To Automate Localization For Flutter Apps","en-us/blog/how-to-automate-localization-for-flutter-apps.yml","en-us/blog/how-to-automate-localization-for-flutter-apps",{"_path":3247,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3248,"content":3253,"config":3258,"_id":3260,"_type":16,"title":3261,"_source":17,"_file":3262,"_stem":3263,"_extension":20},"/en-us/blog/gitops-with-gitlab-secrets-management",{"title":3249,"description":3250,"ogTitle":3249,"ogDescription":3250,"noIndex":6,"ogImage":2478,"ogUrl":3251,"ogSiteName":692,"ogType":693,"canonicalUrls":3251,"schema":3252},"GitOps with GitLab: How to tackle secrets management","In part four of our GitOps series, we learn how to manage secrets with the GitLab Agent for Kubernetes.","https://about.gitlab.com/blog/gitops-with-gitlab-secrets-management","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitOps with GitLab: How to tackle secrets management\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2021-12-02\",\n      }",{"title":3249,"description":3250,"authors":3254,"heroImage":2478,"date":3255,"body":3256,"category":14,"tags":3257},[2014],"2021-12-02","\n\n_It is possible to use GitLab as a best-in-class GitOps tool, and this blog post series is going to show you how. These easy-to-follow tutorials will focus on different user problems, including provisioning, managing a base infrastructure, and deploying various third-party or custom applications on top of them. You can also view our entire [\"Ultimate guide to GitOps with GitLab\"](/blog/the-ultimate-guide-to-gitops-with-gitlab/) tutorial series._\n\nIn this article we will use our cluster connection to manage secrets within our cluster.\n\n## Prerequisites\n\nThis article assumes that you have a Kubernetes cluster connected to GitLab using the GitLab Agent for Kubernetes. If you don't have such a cluster, I recommend looking at the linked articles above so you have a similar setup from where we will start today.\n\n## A few words about secrets management\n\nThe Kubernetes `Secret` resource is a rather tricky one! By design, secrets should have limited access and should be encrypted at rest and in transit. Still, by default, Kubernetes does not encrypt secrets at rest and accessing them might not be restricted in your cluster. We will not go into detail about how to secure your cluster with respect to secrets in this article. Instead, we want to focus on getting some secrets configured in your cluster with a GitOps approach.\n\nManaging secrets with GitOps means you store those secrets within your Git repository. Of course, you should never store unencrypted secrets in a repo, and some security people are even reluctant to store encrypted secrets in Git. We will not be that worried, but you should consider if this is an acceptable risk for you. There is an alternative we'll talk about, below, if you prefer to not manage your secrets in Git.\n\nThere are a few benefits of Git-based secrets management:\n\n- you get versioning by default\n- collaboration is supported using merge requests\n- as secrets are in code, you push responsibilities towards the development team\n- the tools used are well-known to developers\n\n## Secrets management with GitLab\n\nWhen it comes to secrets, Kubernetes, and GitLab, there are at least 3 options to choose from:\n\n- create secrets automatically from environment variables in GitLab CI\n- manage secrets through HashiCorp Vault and GitLab CI\n- manage secrets in git with a GitOps approach\n\n### Create secrets automatically from environment variables in GitLab CI\n\nThe Auto Deploy template applies every [`K8S_SECRET_` prefixed environment variable](https://docs.gitlab.com/ee/topics/autodevops/customize.html#application-secret-variables) into your cluster as a Kubernetes Secret. Later, your applications can reference these secrets. This approach is the simplest to use, especially if you would like to use [Auto DevOps](/topics/devops/). We will look into it in a future article.\n\nWhile simple to use, with this approach your secrets are stored in the GitLab database, instead of `Git`. That means you lose versioning of the secrets, you need `Maintainer` rights to modify these secrets, and you lose the ability to approve a change of secret in a merge request.\n\n### Manage secrets through HashiCorp Vault and GitLab CI\n\n[GitLab CI/CD integrates with HashiCorp Vault](https://docs.gitlab.com/ee/ci/examples/authenticating-with-hashicorp-vault/#authenticating-and-reading-secrets-with-hashicorp-vault) to support advanced secrets management use cases. You can combine the `K8S_SECRET_` prefixed use case even with Vault-based secrets, and have the secrets applied automatically. \n\nWith this approach, you get the all the benefits of HashiCorp Vault, but there is a question: why do you move secrets from Vault to GitLab just to move them to your cluster instead of retrieving the secrets directly from within your cluster? We recommend leaving GitLab out of this flow if you don't have a really good reason to provide secret access to GitLab too! Vault has really great Kubernetes support, thus retrieving secrets directly should be feasible.\n\n### Manage secrets in Git with a GitOps approach\n\nTo manage secrets in Git, we will need some kind of tooling to take care of the encryption/decryption of the secrets. In this article, I will show you how to set up and use [Bitnami's Sealed Secrets](https://github.com/bitnami-labs/sealed-secrets), but you can try other tools, like [SOPS](https://github.com/mozilla/sops) too. We will look into Bitnami's approach as it targets Kubernetes exclusively, unlike SOPS that supports other use cases too, and might need a bit more setup for Kubernetes.\n\nBitnami's Sealed Secrets is composed of an in-cluster controller and a CLI tool. The cluster component defines a `SealedSecret` custom resource that stores the encrypted secret and related metadata. Once a `SealedSecret` is deployed into the cluster, the controller decrypts it and creates a native Kubernetes `Secret` resource from it. To create a `SealedSecret` resource, the `kubeseal` utility can be used. `kubeseal` can take a public key and transform and encrypt a native Kubernetes `Secret` into a `SealedSecret`, and `kubeseal` can help with retrieving the public key from the cluster-side controller too.\n\n## Setting up Bitnami's Sealed Secrets\n\nAs the GitLab Agent supports pure Kubernetes manifests to do GitOps, we will need the manifests for Sealed Secrets. Open the [Sealed Secrets releases page](https://github.com/bitnami-labs/sealed-secrets/releases/) and find the most recent release (Don't be fooled by the `helm` releases!). At the time of writing this article, the most recent [release is v0.16.0](https://github.com/bitnami-labs/sealed-secrets/releases/tag/v0.16.0). From there you can download the release `yaml`, if your cluster supports RBAC, I recommend the basic `controller.yaml` file.\n\n- Save and commit the `controller.yaml` under `kubernetes/sealed-secrets.yaml`\n\nPush the changes and wait a few seconds for them to get applied. Check that they got applied successfully using: `kubectl get pods -n kube-system -l name=sealed-secrets-controller`\n\n## Retrieving the public key\n\nWhile the user can encrypt a secret directly with `kubeseal`, this approach requires them to have access to the Kube API. Instead of providing access, we can fetch the public key from the Sealed Secrets controller and store it in the Git repo. The public key can be used to encrypt secrets, but is useless for decrypting them.\n\n```bash\nkubeseal --fetch-cert > sealed-secrets.pub.pem\n```\n\n### How to avoid storing unencrypted secrets\n\nI prefer to have an `ignored` directory within my Git repo. The content of this directory is never committed to Git, and I put every sensitive data under this directory.\n\n```bash\nmkdir ignored\ncat \u003C\u003CEOF > ignored/.gitignore\n*\n!.gitignore\nEOF\n```\n\n## Continue with setup - not needed if we use a box\n\nNow, you can create sealed secrets with the following two commands:\n\n```bash\necho \"Very secret\" | kubectl create secret generic my-secret -n gitlab-agent --dry-run=client --type=Opaque --from-file=token=/dev/stdin -o yaml > ignored/my-secret.yaml\nkubeseal --format=yaml --cert=sealed-secrets.pub.pem \u003C ignored/my-secret.yaml > kubernetes/\n```\n\nThe first command creates a regular Kubernetes `Secret` resource in the `gitlab-agent` namespace. Setting the namespace is important if you use Sealed Secrets and every SealedSecret is scoped for a specific namespace. You can read more about this in the Sealed Secrets documentation.\n\nThe second command takes a `Secret` resource object and turns it into an encrypted `SealedSecret` resource. In my case, the secret file:\n\n```yaml\napiVersion: v1\ndata:\n  token: VmVyeSBzZWNyZXQK\nkind: Secret\nmetadata:\n  creationTimestamp: null\n  name: my-secret\n  namespace: gitlab-agent\ntype: Opaque\n```\n\ngot turned into:\n\n```yaml\napiVersion: bitnami.com/v1alpha1\nkind: SealedSecret\nmetadata:\n  creationTimestamp: null\n  name: my-secret\n  namespace: gitlab-agent\nspec:\n  encryptedData:\n    token: AgC1m/D1UwliKD3C2QSv/g+zBi1qGz1YTLZfqnl5JJ4NydCatKzsp8LZr2stIlkwcS3f2YAo/ZIq1OUhOgSgkuNMwVdqsBx1zq7Z3xpGLMIMe7B3XhQ+ExWwqgrm1dTiTDHaH9eXsZWaNsruKQU0F8oGxgLfO/axEZeGWd4WngZRaed9B43dy2k05B6fZnxmwtUVSpr86MO52fX06/QdbvB8MZTrYb7qFuL14U0IDvdFl4l8sPl2rrXsriKg0fJHIV6XtlCwPpQGozTZTUX8nbvU0yXothBzPbaIUfXseFqaW8i/i0Ai+aKhWQAjPGooVAXGwKsuve16DxZ6GJPp1ymR1cEsBkEPlYKbVCKtH5VuptCYZuTXMM6OEPzjFabaIMIUVkkciHlUMcpKFfPnpf7XbBNqZCAKjt//9L99gc48dJRyO4pCrcpFnv6287d65UGnWjmcUJNQNBhEuh9k4esfEZuBNiYIz3Ouz7Wg5HQoT6v3i3J1X5LluWEcTK1G10T7UN+QrnklH4yUtx35yLp83B5/TGICo0Yq1QnARNbKhL5EXuwAO427XO65zzJ3Lh2ymUfrBY3bHO8NW4ykO7ZNDRdj/fsge1J8k4yaxeimQapDKs4XMhoNnKqUNPQYaiQzNPRoj9JwMvtvOH+WLJqEXHIc8RooWGkdo/SB7zp3q7OuHk6HRJM+AQVP3t0r3A1bVhHonUGlv1ApduM=\n  template:\n    metadata:\n      creationTimestamp: null\n      name: my-secret\n      namespace: gitlab-agent\n    type: Opaque\n```\n\nJust commit the `SealedSecret` and quickly start to watch for the event stream using `kubectl get events --all-namespaces --watch` to see when the sealed secret is unsealed and applied as a regular `Secret`.\n\n## Utility scripts\n\nIf you found the `kubeseal` command above to be quite complex, you can wrap it in a script.\n\n- Create `bin/seal-secret.sh` with the following content:\n\n```bash\n#!/bin/sh\n\nif [ $# -ne 2 ]\n  then\n    echo \"Usage: $0 ignored/my-secret.yaml output-dir/\"\n    echo \"This script requires two arguments\"\n    echo \"The first argument should be the unsealed secret\"\n    echo \"The second argument should be the directory to output the sealed secret\"\n  exit 1\nfi\n\n\nSECRET_FILE=$(basename $1)\n\nkubeseal --format=yaml --cert=sealed-secrets.pub.pem \u003C $1 > \"$2/SealedSecret.${SECRET_FILE}\"\n\necho \"Created file $2/SealedSecret.${SECRET_FILE}\"\n```\n\nThis script takes a path to a vanilla Kubernetes secret and an output directory, and tranforms your `Secret` into a `SealedSecret`.\n\n## Winding it up\n\nIn this article, we have seen how you can install Bitnami's Sealed Secret into your cluster and set it up for static secrets management. Please note the installation method provided here works for all the other 3rd party, off-the-shelf applications that can be deployed using Kubernetes manifests only.\n\n## What is next?\n\nIn the next article, we will see how you can access a Kubernetes cluster using GitLab CI/CD and why you might want to do it even if you aim for GitOps.\n\n_[Click here](/blog/the-ultimate-guide-to-gitops-with-gitlab/) for the next tutorial._\n\n\n",[535,1002,915],{"slug":3259,"featured":6,"template":678},"gitops-with-gitlab-secrets-management","content:en-us:blog:gitops-with-gitlab-secrets-management.yml","Gitops With Gitlab Secrets Management","en-us/blog/gitops-with-gitlab-secrets-management.yml","en-us/blog/gitops-with-gitlab-secrets-management",{"_path":3265,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3266,"content":3271,"config":3276,"_id":3278,"_type":16,"title":3279,"_source":17,"_file":3280,"_stem":3281,"_extension":20},"/en-us/blog/gitops-with-gitlab-connecting-the-cluster",{"title":3267,"description":3268,"ogTitle":3267,"ogDescription":3268,"noIndex":6,"ogImage":2478,"ogUrl":3269,"ogSiteName":692,"ogType":693,"canonicalUrls":3269,"schema":3270},"GitOps with GitLab: Connect with a Kubernetes cluster","In our third article in our GitOps series, learn how to connect a Kubernetes cluster with GitLab for pull and push-based deployments.","https://about.gitlab.com/blog/gitops-with-gitlab-connecting-the-cluster","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitOps with GitLab: Connect with a Kubernetes cluster\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2021-11-18\",\n      }",{"title":3267,"description":3268,"authors":3272,"heroImage":2478,"date":3273,"body":3274,"category":14,"tags":3275},[2014],"2021-11-18","\n_It is possible to use GitLab as a best-in-class GitOps tool, and this blog post series is going to show you how. These easy-to-follow tutorials will focus on different user problems, including provisioning, managing a base infrastructure, and deploying various third-party or custom applications on top of them. You can find the entire \"Ultimate guide to GitOps with GitLab\" tutorial series [here](/blog/the-ultimate-guide-to-gitops-with-gitlab/)._\n\n## GitOps with GitLab: connecting a Kubernetes cluster\n\nThis [GitOps](/topics/gitops/) with GitLab post shows how to connect a Kubernetes cluster with GitLab for pull and push based deployments and easy security integrations. In order to do so, the following elements are required:\n\n- A Kubernetes cluster that you can access and can create new resources, including `Role` and `RoleBinding` in it. \n- You will need `kubectl` and your local environment configured to access the beforementioned cluster.\n- (Optional, recommended) Terraform and a Terraform project set up as shown [in the previous article](/blog/gitops-with-gitlab-infrastructure-provisioning/) to retrieve an agent registration token from GitLab.\n- (Optional, recommended) `kpt` and `kustomize` to install the Agent into your cluster.\n- (Optional, quickstart) If you prefer a less \"gitopsy\" approach, you will need `docker` (Docker Desktop is not needed). This is simpler to follow, but provides less control to you.\n\n## How to connect a cluster to GitLab\n\nThere are many ways how one can connect a cluster to GitLab:\n\n- you can set up a `$KUBECONTEXT` variable manually, manage all the related connections and use GitLab CI/CD to push changes into your cluster\n- you can use a 3rd party tool, like [ArgoCD](https://argo-cd.readthedocs.io/en/stable/) or [Flux](https://fluxcd.io) to get pull based deployments\n- you can use the legacy, certificate-based cluster integration within GitLab in which case GitLab will manage the `$KUBECONTEXT` for you and you can get easy metrics, log and monitoring integrations\n- or you can use the recommended approach, the [GitLab Agent for Kubernetes](https://docs.gitlab.com/ee/user/clusters/agent/), to have pull and push based deployment support, network security policy integrations and the possibility of metrics and monitoring too\n\nWe are going to focus on the Agent-based setup here as we believe that it serves and will serve our users best, hopefully you included.\n\n## How does the Agent work\n\nThe Agent has a component that needs to be installed into your cluster. We call this component `agentk`. Once `agentk` is installed it reaches out to GitLab, and authenticates itself with an access token. So, the first step is to get a token from GitLab. We call this step \"the Agent registration.\" If the authentication succeeds, `agentk` sets up a bidirectional GRPC channel between itself and GitLab. The emphasis here is on \"bidirectional.\" This enables requests and messages to be sent by either side and provides the possibility of much deeper integrations than the other approaches while still being a nice citizen within your cluster.\n\nOnce the connection is established, the Agent retrieves its own configuration from GitLab. This configuration is a `config.yaml` file under a repository, and you actually register the location of this configuration file when you register a new Agent. The configuration describes the various capabilities enabled of an Agent.\n\nOn the GitLab side, `agentk` communicates with - what we call - the Kubernetes Agent Server, or `kas`. As most users do not have to deal with setting up `kas`, I won't write about it here. You need to be a GitLab administrator [to set up and manage `kas`](https://docs.gitlab.com/ee/administration/clusters/kas.html). If you are on gitlab.com, `kas` is available to you at `kas.gitlab.com`, thanks to our amazing SRE team.\n\nSo the steps we are going to take in this article are the following:\n\n1. Create a configuration file for the Agent\n1. Register the Agent and retrieve its authentication token\n1. Install `agentk` into the cluster together with the token\n\nFinally, we will set up an example pull-based deployment just to test that everything worked as expected. Let's get started!\n\n## How many Agents do you need for a larger setup\n\nWe recommend having a separate Agent registered at least against each of your environments. If you have multiple clusters, have at least one agent registered with each cluster. While it is possible to have many `agentk` deployments with the same authentication token and thus configuration file, this is not supported and might lead to syncronization problems!\n\nThe different agent configurations can use the same Kubernetes manifests for deployments. So maintaining a multi-region cluster where all the clusters should be identical does not require much effort. \n\nWe designed `agentk` to be very lightweight so you should not worry about deploying multiple instances of it into a cluster. \n\nWe know users who use separate `agentk` instances by squad for example. In these situations, the `squad` owns some namespaces in the cluster and each Agent can access only the namespaces available for their squad. This way `agentk` is not just a good citizen in your cluster, but is like a team member in your squad.\n\n## Create a configuration file for the Agent\n\nNote:\nYou can use either the Terraform project from the previous step or start with a new project. I will assume that we build on top of the Terraform setup from the previous article, linked above, that will come in handy when we want to register the Agent using Terraform. I won't go through setting up all the environment variables here for local Terraform run.\n\nDecide about your agent name, and create an empty file in your project under `.gitlab/agents/\u003Cyour agent name>/config.yaml`. Nota bene, that the extension is `yaml` not `yml` and your agent name must follow the [DNS label standard from RFC 1123](https://docs.gitlab.com/ee/user/clusters/agent/install/#create-an-agent-configuration-file). I'll call my agent `demo-agent`, so the file is under `.gitlab/demo-agent/config.yaml`.\n\n## Register the Agent\n\nThe next step is to register the Agent with GitLab. You can do this either through the GitLab UI or using Terraform. I will show you both approaches.\n\n### Registering through the UI\n\nOnce the configuration file is in place, visit `Infrastructure/Kubernetes` and add a new cluster using the Agent. A dialog will pop up where you can select your agent.\n\nOnce you hit \"next,\" you will see the registration token and a `docker` command for easy installation. The `docker` command includes the token too and you can run it to quickly set up an `agentk` inside of your cluster. (You might need to create a namespace first!) Feel free to run the command for a quickstart or follow the tutorial for a truly code-based approach.\n\n### Registering through code\n\nWe will use Terraform to register the Agent through code. Let's create the following files:\n\n- Under `terraform/gitlab-agent/main.tf`\n\n```hcl\nterraform {\n  backend \"http\" {\n  }\n  required_version = \">= 0.13\"\n  required_providers {\n    gitlab = {\n      source = \"gitlabhq/gitlab\"\n      version = \"~>3.6.0\"\n    }\n  }\n}\n\nprovider \"gitlab\" {\n    token = var.gitlab_password\n}\n\nmodule \"gitlab_kubernetes_agent_registration\" {\n  source = \"gitlab.com/gitlab-org/kubernetes-agent-terraform-register-agent/local\"\n  version = \"0.0.2\"\n\n  gitlab_project_id = var.gitlab_project_id\n  gitlab_username = var.gitlab_username\n  gitlab_password = var.gitlab_password\n  gitlab_graphql_api_url = var.gitlab_graphql_api_url\n  agent_name = var.agent_name\n  token_name = var.token_name\n  token_description = var.token_description\n}\n```\n\nAs you can see we will use a module here. The module is hosted using the Terraform registry provided by GitLab. You can check out [the module source code here](https://gitlab.com/gitlab-org/configure/examples/kubernetes-agent-terraform-register-agent). You might have guessed correctly that under the hood the module uses the GitLab GraphQL API to register the agent and retrieve a token. We will need to set up variables for it to work.\n\n- Create `terraform/gitlab-agent/variables.tf`\n\n```hcl\nvariable \"gitlab_project_id\" {\n  type = string\n}\n\nvariable \"gitlab_username\" {\n  type = string\n}\n\nvariable \"gitlab_password\" {\n  type = string\n}\n\nvariable \"agent_name\" {\n  type = string\n}\n\nvariable \"token_name\" {\n  type    = string\n  default = \"kas-token\"\n}\n\nvariable \"token_description\" {\n  type    = string\n  default = \"Token for KAS Agent Authentication\"\n}\n\nvariable \"gitlab_graphql_api_url\" {\n  type    = string\n  default = \"https://gitlab.com/api/graphql\"\n}\n```\n\n- Create `terraform/gitlab-agent/outputs.tf`\n\n```hcl\noutput \"agent_id\" {\n  value     = module.gitlab_kubernetes_agent_registration.agent_id\n}\n\noutput \"token_secret\" {\n  value     = module.gitlab_kubernetes_agent_registration.token_secret\n  sensitive = true\n}\n```\n\nOnce the registration is over, you'll be able to retrieve the agent ID and the token using these Terraform outputs.\n\n### Run the Terraform project\n\nOnce the above code is in place, we need to run it to actually register the Agent. Here, I am going to extend the setup from the previous article.\n\n#### Running locally\n\n- Create `terraform/gitlab-agent/.envrc`  as you did for the network project.\n\n```\nexport TF_STATE_NAME=${PWD##*terraform/}\nsource_env ../../.main.env\n```\n\nNow run Terraform\n\n```bash\nterraform init\nterraform plan\nterraform apply\n```\n\n#### Running from CI/CD pipeline\n\nExtend the `.gitlab-ci.yml` file with the following 3 jobs:\n\n```hcl\ngitlab-agent:init:\n  extends: .terraform:init\n  stage: init\n  variables:\n    TF_ROOT: terraform/gitlab-agent\n    TF_STATE_NAME: gitlab-agent\n  only:\n    changes:\n      - \"terraform/gitlab-agent/*\"\n\ngitlab-agent:review:\n  extends: .terraform:build\n  stage: build\n  variables:\n    TF_ROOT: terraform/gitlab-agent\n    TF_STATE_NAME: gitlab-agent\n  resource_group: tf:gitlab-agent\n  only:\n    changes:\n      - \"terraform/gitlab-agent/*\"\n\ngitlab-agent:deploy:\n  extends: .terraform:deploy\n  stage: deploy\n  variables:\n    TF_ROOT: terraform/gitlab-agent\n    TF_STATE_NAME: gitlab-agent\n  resource_group: tf:gitlab-agent\n  environment:\n    name: demo-agent\n  when: manual\n  only:\n    changes:\n      - \"terraform/gitlab-agent/*\"\n    variables:\n      - $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n```\n\nAs you can see these are the same jobs that we saw already, they are just parameterized for the `gitlab-agent` terraform project.\n\nNota bene, even if you use GitLab to register the Agent, you will need your command line to install `agentk` for the first time! As a result, you can not avoid a local setup as you will need to run at least `terraform output` to retrieve the token!\n\n## Install `agentk`\n\nIn this tutorial we are going to follow [the advanced installation instructions](https://docs.gitlab.com/ee/user/clusters/agent/install/index.html#advanced-installation) from the GitLab documentation. This approach is highly customizable using `kustomize` and `kpt`.\n\nFirst, let's retrieve the basic Kubernetes resource definitions using `kpt`:\n\n- Create a directory `packages` using `mkdir packages`\n- Run `kpt pkg get https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent.git/build/deployment/gitlab-agent packages/gitlab-agent`\n\nThis will retrieve the most recent version of the `agentk` installation resources. You can request a tagged version with the well-known `@` syntax, for example by running `kpt pkg get https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent.git/build/deployment/gitlab-agent@v14.4.0 packages/gitlab-agent`. You can see [all the available versions here](https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent/-/tags).\n\n### Why `kpt` - could we make this a box?\n\nThe choice of `kpt` is because it allows sane upstream package management to you. With `kpt` you will be able to regularly update your packages using something like `kpt pkg update packages/gitlab-agent@\u003Cnew version> --strategy=resource-merge`. It basically allows you to modify your package locally, and will try to merge upstream changes into it. Read the `kpt pkg update -h` output for more information and alternative merge strategies.\n\n### Continue with the installation - if it's a box, this is not needed\n\nThe `kpt` packages you retrieved are actually a set up `kustomize` overlays. The `base` defines only the `agentk` deployment and namespace; the `cluster` defines some default RBAC around the deployment. Feel free to add your own overlays and use those. We will extend this package with custom overlays in a part 6 of the series.\n\nTo configure the package, see the available configuration options using:\n\n```bash\nkustomize cfg list-setters packages/gitlab-agent\n        NAME                 VALUE               SET BY                  DESCRIPTION              COUNT   REQUIRED   IS SET  \n  agent-version       stable                 package-default   Image tag for agentk container     1       No         No      \n  kas-address         wss://kas.gitlab.com   package-default   kas address. Use                   1       No         No      \n                                                               grpc://host.docker.internal:8150                              \n                                                               if connecting from within Docker                              \n                                                               e.g. from kind.                                               \n  name-prefix                                                  Prefix for resource names          1       No         No      \n  namespace           gitlab-agent           package-default   Namespace to install GitLab        2       No         No      \n                                                               Kubernetes Agent into                                         \n  prometheus-scrape   true                   package-default   Enable or disable Prometheus       1       No         No      \n                                                               scraping of agentk metrics.                              \n```\n\nThe package default will be different if you used a tagged version for getting the package. Let's set the version as using `stable` is not recommended.\n\n```bash\nkustomize cfg set packages/gitlab-agent agent-version v14.4.1\nset 1 field(s) of setter \"agent-version\" to value \"v14.4.1\"\n```\n\nFeel free to adjust the other configuration options too or add you own overlays if that is needed.\n\n### Which agent-version to use - could we make this a box?\n\nIf possible the version of `agentk` should match the major and minor version of your GitLab instance. You can find our the version of your GitLab instance under the Help menu on the UI.\n\nIf there is no agent version with your major and minor version, then pick the agent with the highest major and minor below the version of your GitLab.\n\n### Continue with the installation - if it's a box, this is not needed\n\nWarning:\nBefore the next step, I want to warn you about never, ever committing unencrypted secrets into git, and the agent registration token is a secret!\n\nLet's retrieve the agent registration token from our Terraform project. Run the following command in the `terraform/gitlab-agent` directory:\n\n```bash\nterraform output -raw token_secret > ../../packages/gitlab-agent/base/secrets/agent.token\n```\n\nThis writes the registration token to a file on your local computer. Do not commit these changes to git!\n\nAt this point, we are ready to deploy `agentk` into the cluster, so run:\n\n```bash\nkustomize build packages/gitlab-agent/cluster | kubectl apply -f -\n```\n\nLet's get rid of the secret:\n\n```bash\necho \"Invalid token\" > packages/gitlab-agent/base/secrets/agent.token\n```\n\nYou are good to commit your changes to `git` now!\n\n## Testing the setup\n\nWe have installed the Agent, now what? How can we start using it? In the next article we will see in detail how to deploy a more serious application into the cluster. Still, to check that cluster syncronization actually works, let's deploy a `ConfigMap`.\n\n- Create `kubernetes/test_config.yaml` with the following content:\n\n```yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: gitlab-gitops\n  namespace: default\ndata:\n  key: It works!\n```\n\n- Modify your Agent configuration file under `.gitlab/demo-agent/config.yaml`, and add the following to it:\n\n```yaml\ngitops:\n  # Manifest projects are watched by the agent. Whenever a project changes,\n  # GitLab deploys the changes using the agent.\n  manifest_projects:\n  - id: path/to/your/project\n    default_namespace: gitlab-agent\n    # Paths inside of the repository to scan for manifest files.\n    # Directories with names starting with a dot are ignored.\n    paths:\n    - glob: 'kubernetes/test_config.yaml'\n    #- glob: 'kubernetes/**/*.yaml'\n```\n\nChange the `- id: path/to/your/project` line above to point to your project's path!\n\nThe above configuration tells the Agent to kepp the `kubernetes/test_config.yaml` file in sync with the cluster. I've left a commented line at the end to show how you could use wildcards. This will come handy in future steps of this article. The`default_namespace` is used if no namespace is provided in the Kuberentes manifests. There are many other options to configure as well even for the `gitops` use case. You can read more about these in [the configuration file reference documentation](https://docs.gitlab.com/ee/user/clusters/agent/work_with_agent.html).\n\nOnce you commit the above changes, GitLab notifies `agentk` about the changed files. First, `agentk` updates its configuration; second, it retrieves the `ConfigMap`.\n\nWait a few seconds, and run `kubectl describe configmap gitlab-gitops` to check that the changes got appliedd to your cluster. You should see something similar:\n\n```\nName:         gitlab-gitops\nNamespace:    default\nLabels:       \u003Cnone>\nAnnotations:  config.k8s.io/owning-inventory: 502-28431043\n              k8s-agent.gitlab.com/managed-object: managed\n\nData\n====\nkey:\n",[1002,232,915],{"slug":3277,"featured":6,"template":678},"gitops-with-gitlab-connecting-the-cluster","content:en-us:blog:gitops-with-gitlab-connecting-the-cluster.yml","Gitops With Gitlab Connecting The Cluster","en-us/blog/gitops-with-gitlab-connecting-the-cluster.yml","en-us/blog/gitops-with-gitlab-connecting-the-cluster",{"_path":3283,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3284,"content":3289,"config":3295,"_id":3297,"_type":16,"title":3298,"_source":17,"_file":3299,"_stem":3300,"_extension":20},"/en-us/blog/vscode-workflow-new-features",{"title":3285,"description":3286,"ogTitle":3285,"ogDescription":3286,"noIndex":6,"ogImage":2284,"ogUrl":3287,"ogSiteName":692,"ogType":693,"canonicalUrls":3287,"schema":3288},"Four new tools for your Visual Studio Code and GitLab tool belt","Learn about new features that can help you review MRs and interact with GitLab","https://about.gitlab.com/blog/vscode-workflow-new-features","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Four new tools for your Visual Studio Code and GitLab tool belt\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tomas Vik\"}],\n        \"datePublished\": \"2021-11-17\",\n      }",{"title":3285,"description":3286,"authors":3290,"heroImage":2284,"date":3292,"body":3293,"category":14,"tags":3294},[3291],"Tomas Vik","2021-11-17","\n\nIn our [previous post](/blog/mr-reviews-with-vs-code/), we talked about merge request (MR) Reviews. We explained how the GitLab Workflow extension helps you review MRs without leaving VS Code. Since releasing and polishing the MR reviews, we've been working on improvements to the extension. In this post, we will show you how the latest features fit into your workflow.\n\n### Do you have a lot to say? Use a snippet patch!\n\nOn GitLab's web UI there's the \"suggestions\" feature. It's handy for suggesting small changes in the MR review. The VS Code platform doesn't let us recreate the same experience, but the extension offers an alternative: Snippet patches.\n\nSnippet patches are code changes (git patches) of arbitrary size shared as GitLab snippets. Because they don't have a size limit, they are perfect for suggesting changes to multiple files during the MR review.\n\nThe extension has two commands, `Create snippet patch` and `Apply snippet patch`. These commands use `git diff` and `git apply`, respectively, which means people can still apply the snippet patch even if they don't use the GitLab Workflow extension.\n\nIf a suggestion in the comment is a hammer, then a snippet patch is a pneumatic tamping machine. Next time you'll review an MR, and you see a lot of space for improvement, remember the adage: \"A patch is worth a thousand words\".\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube-nocookie.com/embed/QQxpLoKJULQ\" title=\"YouTube video player\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\n### What's going on with my pipeline? - Improved CI status display\n\nThe extension always showed the latest CI pipeline status in both the status bar and the sidebar. However, if you tried to gauge your pipeline status, you probably run into one or more surprises. The status was hard to understand. Sometimes it related to a different branch, or it was out of date.\n\nWe've made the pipeline status much more reliable and readable. For starters, you can now see individual jobs and their status in the sidebar. Click on any job, and the extension opens a browser window with the GitLab job page.\n\nWe also improved the consistency of showing the pipeline status. The status bar and sidebar are now in sync and always showing pipeline for the current branch.\n\nWe are excited about the cleaner code. It makes it easier for anyone to contribute functionality. If you'd be interested in giving it a shot, we recommend starting with the [Download artifacts from the latest pipeline](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/106) feature request. \n\n\n![VS Code status bar](https://about.gitlab.com/images/blogimages/2021-11-05-vscode-workflow-new-features/ci-pipeline-panel.png){: .shadow.medium.center}\nVS Code CI Pipeline status overview from GitLab extension.\n{: .note .text-center}\n\n### Make the MR your own - Working with checked out code\n\nTwo recent improvements play well together to make your review more interactive. They help you spend less time on actions that don't directly relate to reviewing code. These improvements let you check out the MR branch and open a local file during a review.\n\n#### Check out the MR branch\n\nYou can checkout any MR locally, as long as it is not coming from a forked project. Right-click the MR in the side tree and select \"Checkout MR Branch\". After the command finishes, you'll have the MR branch checked out in your project. Now you can review and run the code.\n\n\u003Cfigure class=\"video_container\">\n  \u003Cvideo src=\"https://gitlab.com/gitlab-org/gitlab-vscode-extension/uploads/db804234ed4d338dea31a27778dba72e/checkout-mr-branch.mp4\" controls=\"true\" data-setup=\"{}\" data-title=\"checkout-mr-branch\" preload=\"metadata\" width=\"560\">\u003C/video>\n\u003C/figure>\n{: .shadow.medium.center}\n\n#### Open a local file during a review\n\nWhen you look at a changed file in an MR, you can click on a small \"file\" icon in the top-right corner. The extension will open the same file in your local repository.\n\nIf your local branch is different from the MR branch, the local file might not be the same as the MR file.\n\nOpening the local file is useful when you want to explore the surroundings of the file quickly. The VS Code automatically focuses the file in the file tree, which lets you see all the neighbouring files.\n\n\u003Cfigure class=\"video_container\">\n  \u003Cvideo src=\"https://gitlab.com/gitlab-org/gitlab-vscode-extension/uploads/de2839b1ceb1be6c33cd80d7fe72bc6d/open-mr-file.mp4\" controls=\"true\" data-setup=\"{}\" data-title=\"open-mr-file\" preload=\"metadata\" width=\"560\">\u003C/video>\n\u003C/figure>\n{: .shadow.medium.center}\n\n### Commitment problems? Browse repositories without checking them out\n\nAt GitLab, we've got some large repositories. The largest, which all GitLabbers use daily, is [www-gitlab-com](https://gitlab.com/gitlab-com/www-gitlab-com), the website you see when you visit `about.gitlab.com`. This 6 GB colossus takes several minutes to check out.\n\nExploring this repository is a perfect use case for our latest feature: Remote Repositories, [contributed by Ethan Reesor](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/merge_requests/321), a community member.\n\nRun the `GitLab: Open Remote Repository` command, pick which project and branch you want to use, and _voilà_.  The extension opens the repository in your local workspace, but it doesn't store data on your local machine.\n\nRemote repositories are useful when you want to browse a repository for a reference but don't plan to change the code.\n\nThis is the first iteration, and it's got some limitations - you can't use full-text search, fuzzy file navigation, and the files are read-only. It's useful nonetheless.\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube-nocookie.com/embed/p4GTVx_Nd2s\" title=\"YouTube video player\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\n### Thank you community!\n\nMost of the features introduced in this post are either implemented or suggested by a community member. Ahmed Mohamadeen [suggested](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/390) opening local file during MR review, Musisimaru [created initial implementation](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/merge_requests/203) of checking out MR branch, and Ethan Reesor [implemented](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/merge_requests/321) remote repositories.\n\n\nIf you'd like to shape the future of the GitLab Workflow VS Code extension, you can create issues in [our issue tracker](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues), or look for [issues where we accept MRs](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues?label_name%5B%5D=Accepting+merge+requests). Our [CONTRIBUTING](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/blob/main/CONTRIBUTING.md) guide is an excellent place to start.\n\nCover image by [Ljubica Petkovic](https://ljubicapetkovic.com), licensed under [CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/)\n{: .note}\n",[232,726,894],{"slug":3296,"featured":6,"template":678},"vscode-workflow-new-features","content:en-us:blog:vscode-workflow-new-features.yml","Vscode Workflow New Features","en-us/blog/vscode-workflow-new-features.yml","en-us/blog/vscode-workflow-new-features",{"_path":3302,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3303,"content":3309,"config":3314,"_id":3316,"_type":16,"title":3317,"_source":17,"_file":3318,"_stem":3319,"_extension":20},"/en-us/blog/gko-on-ocp",{"title":3304,"description":3305,"ogTitle":3304,"ogDescription":3305,"noIndex":6,"ogImage":3306,"ogUrl":3307,"ogSiteName":692,"ogType":693,"canonicalUrls":3307,"schema":3308},"How to install and use the GitLab Kubernetes Operator","Follow these step-by-step instructions to set up the GitLab Kubernetes Operator on a Kubernetes cluster.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682191/Blog/Hero%20Images/GKO-Thumbnail.png","https://about.gitlab.com/blog/gko-on-ocp","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to install and use the GitLab Kubernetes Operator\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2021-11-16\",\n      }",{"title":3304,"description":3305,"authors":3310,"heroImage":3306,"date":3311,"body":3312,"category":14,"tags":3313},[1101],"2021-11-16","\n\nThe GitLab Kubernetes Operator was released on October 12, 2021.\n\n## What is the GitLab Kubernetes Operator?\n\nThe GitLab Operator allows you to install and run an instance of GitLab in a vanilla Kubernetes or OpenShift cluster. Kubernetes operators increase the reliability and availability of your applications by automating Day 2 operations such as upgrading components, management of data integrity, application reconfiguration, automatic recovery from a failure, and autoscaling.\n\n## Installing the GitLab Kubernetes Operator on an OpenShift Container Platform cluster\n\nIn this short post, we show you how to install and run the GitLab Operator to create a GitLab instance on an OpenShift Container Platform cluster, which we have already preinstalled:\n\n![OCP console](https://about.gitlab.com/images/blogimages/gko-on-ocp/0-ocp-console.png){: .shadow.medium.center.wrap-text}\nThe OpenShift Container Platform console\n{: .note.text-center}\n\nInspecting the running pods of the OpenShift cluster, we see that Prometheus is already being used as the metrics server, which is a prerequisite for the installation of the GitLab Operator:\n\n![Prometheus up and running](https://about.gitlab.com/images/blogimages/gko-on-ocp/1-prometheus-up.png){: .shadow.medium.center.wrap-text}\nPrometheus up and running on cluster\n{: .note.text-center}\n\nAlso, we verify that the gitlab-system namespace does not yet exist:\n\n![gitlab namespace not present](https://about.gitlab.com/images/blogimages/gko-on-ocp/2-no-gitlab-sys-namespace.png){: .shadow.medium.center.wrap-text}\ngitlab-system namespace non-existent\n{: .note.text-center}\n\nAnother prerequisite is cert-manager, which automates the management and issuance of TLS certificates. Let’s use the OpenShift OperatorHub to install and instantiate an instance of cert-manager. We first verify that one is not running. Then we head to the OperatorHub and install the cert-manager Operator:\n\n![cert-manager in OperatorHub](https://about.gitlab.com/images/blogimages/gko-on-ocp/3-cert-mgr-in-operatorhub.png){: .shadow.medium.center.wrap-text}\nInstalling cert-manager using its operator in OperatorHub\n{: .note.text-center}\n\n**NOTE:** Once the GitLab Kubernetes Operator is certified with OpenShift, it will have its own tile in the OperatorHub.\n{: .alert .alert-info}\n\nThen we create an instance of cert-manager by using its newly installed operator:\n\n![cert-manager instance creation](https://about.gitlab.com/images/blogimages/gko-on-ocp/4-create-instance-cert-mgr.png){: .shadow.medium.center.wrap-text}\nCreating an instance of cert-manager using its operator\n{: .note.text-center}\n\nIn preparation of the GitLab Operator installation, we create the namespace gitlab-system, under which all of the GitLab resources will be:\n\n![gitlab-system namespace creation](https://about.gitlab.com/images/blogimages/gko-on-ocp/5-create-gitlab-sys-namespace.png){: .shadow.medium.center.wrap-text}\nCreating the gitlab-system namespace\n{: .note.text-center}\n\nTo install the GitLab Operator, we define two environment variables: one is to set the version of the GitLab Operator we want to use and the other one is to set the platform for which we are targeting the Operator. In this case, it is OpenShift. We then apply the GitLab Operator Custom Resource Definition or CRD to the cluster, which creates the operator, by entering the following command:\n\n```\nexport GL_OPERATOR_VERSION=\"0.1.0\" \nexport PLATFORM=\"openshift\"\nkubectl apply -f https://gitlab.com/api/v4/projects/18899486/packages/generic/gitlab-operator/${GL_OPERATOR_VERSION}/gitlab-operator-${PLATFORM}-${GL_OPERATOR_VERSION}.yaml\n```\n\nAnd here's is an example screenshot of what the output of this command would be like:\n\n![application of the CRD to the cluster](https://about.gitlab.com/images/blogimages/gko-on-ocp/6-applying-the-crd.png){: .shadow.medium.center.wrap-text}\nApplying the GitLab Kubernetes Operator to the OpenShift cluster\n{: .note.text-center}\n\nAs we watch the pods in the gitlab-system namespace, we see the creation of two pods for the gitlab-controller-manager:\n\n![operator pods](https://about.gitlab.com/images/blogimages/gko-on-ocp/7-watching-operator-pods-creation.png){: .shadow.medium.center.wrap-text}\nGitLab Kubernetes Operator pods being created on the OpenShift cluster\n{: .note.text-center}\n\nThe GitLab Kubernetes Operator is now installed on the OpenShift Container Platform cluster. Next, we need to use this newly installed operator to create an instance of GitLab.\n\n## Creating a GitLab instance on the cluster using the GitLab Kubernetes Operator\n\nTo create an instance of GitLab, we create a Custom Resource file called mygitlab.yaml to provide information, such as domain name and certmanager issuer email, for the GitLab Operator to use during the creation of the GitLab instance. Here is a parameterized example of the contents for this file:\n\n```\napiVersion: apps.gitlab.com/v1beta1\nkind: GitLab\nmetadata:\n  name: gitlab\nspec:\n  chart:\n    version: \"[REPLACE WITH THE CHART VERSION]\"\n    values:\n      global:\n        hosts:\n          domain: [REPLACE WITH YOUR DOMAIN NAME]\n        ingress:\n          configureCertmanager: true\n      certmanager-issuer:\n        email: [REPLACE WITH YOUR EMAIL]\n```\n\nAnd here is an example screenshot of what this file would look like with actual values for the parameters:\n\n![creating-gitlab-yaml-file](https://about.gitlab.com/images/blogimages/gko-on-ocp/8-creating-mygitlab-yaml.png){: .shadow.small.center.wrap-text}\nCreating mygitlab.yaml, the custom resource file\n{: .note.text-center}\n\nWe then apply the Custom Resource to the cluster. This action will kickstart the creation of all the pods needed for the instantiation of a GitLab instance on the cluster:\n\n![applying the custom resource to the cluster](https://about.gitlab.com/images/blogimages/gko-on-ocp/9-applying-the-cr.png){: .shadow.medium.center.wrap-text}\nApplying the custom resource file to the cluster\n{: .note.text-center}\n\nAfter a few minutes, when the GitLab instance is up and running, we obtain its external IP address from the nginx ingress controller installed by the GitLab Operator by entering the following command:\n\n> kubectl -n gitlab-system get services -o wide gitlab-nginx-ingress-controller\n\nHere's an example screenshot of its output:\n\n![getting the external ip](https://about.gitlab.com/images/blogimages/gko-on-ocp/10-get-external-ip.png){: .shadow.medium.center.wrap-text}\nObtaining the external IP address for our newly created GitLab instance\n{: .note.text-center}\n\nWe use this IP address to create DNS A records to map the DNS names of three (minio, registry, and gitlab) of the GitLab instance subsystems to it. Here is a snapshot for the gitlab one (you need to do the same for the minio and registry subsystems):\n\n![creating dns record](https://about.gitlab.com/images/blogimages/gko-on-ocp/11-creating-dns-record.png){: .shadow.medium.center.wrap-text}\nCreating DNS A record for the gitlab subsystem\n{: .note.text-center}\n\n**NOTE:** I owned the domain ocpgitlab.com. You would use a domain that you own.\n{: .alert .alert-info}\n\n## Logging in to the newly created instance running on the OpenShift Container Platform cluster\n\nBefore logging in to our newly created GitLab instance running on OpenShift Container Platform, we need to obtain the initial root password, which is a secret stored under the gitlab-system namespace. You obtain the initial root password for the newly created GitLab instance by entering the following command:\n\n> kubectl -n gitlab-system get secret gitlab-gitlab-initial-root-password -ojsonpath='{.data.password}' \\| base64 --decode ; echo\n\nAt this moment, we can point our browser to our newly created GitLab instance on OpenShift and login as root:\n\n![logging in to GitLab](https://about.gitlab.com/images/blogimages/gko-on-ocp/13-log-in-to-gitlab.png){: .shadow.medium.center.wrap-text}\nLogging in to the newly created GitLab instance running on the OpenShift Container Platform cluster\n{: .note.text-center}\n\nThat’s it!\n\n## Conclusion\n\nWe have shown you how to install and run the GitLab Operator to create a GitLab instance on an OpenShift Container Platform cluster. View [this demo](https://youtu.be/sEBnuhzYD2I) to see how this feature works.\n\n## Read more on Kubernetes\n\n- [Threat modeling the Kubernetes Agent: from MVC to continuous improvement](/blog/threat-modeling-kubernetes-agent/)\n\n- [How to deploy the GitLab Agent for Kubernetes with limited permissions](/blog/setting-up-the-k-agent/)\n\n- [A new era of Kubernetes integrations on GitLab.com](/blog/gitlab-kubernetes-agent-on-gitlab-com/)\n\n- [Understand Kubernetes terminology from namespaces to pods](/blog/kubernetes-terminology/)\n\n- [What we learned after a year of GitLab.com on Kubernetes](/blog/year-of-kubernetes/)\n\n",[1002,749,232],{"slug":3315,"featured":6,"template":678},"gko-on-ocp","content:en-us:blog:gko-on-ocp.yml","Gko On Ocp","en-us/blog/gko-on-ocp.yml","en-us/blog/gko-on-ocp",{"_path":3321,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3322,"content":3328,"config":3333,"_id":3335,"_type":16,"title":3336,"_source":17,"_file":3337,"_stem":3338,"_extension":20},"/en-us/blog/machine-learning-on-the-gitlab-devops-platform",{"title":3323,"description":3324,"ogTitle":3323,"ogDescription":3324,"noIndex":6,"ogImage":3325,"ogUrl":3326,"ogSiteName":692,"ogType":693,"canonicalUrls":3326,"schema":3327},"How Comet can streamline machine learning on The GitLab DevOps Platform","Here's a step-by-step look at how to bring ML into software development using Comet on GitLab's DevOps Platform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669991/Blog/Hero%20Images/ways-to-encourage-collaboration.jpg","https://about.gitlab.com/blog/machine-learning-on-the-gitlab-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Comet can streamline machine learning on The GitLab DevOps Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Arias\"}],\n        \"datePublished\": \"2021-11-08\",\n      }",{"title":3323,"description":3324,"authors":3329,"heroImage":3325,"date":3330,"body":3331,"category":14,"tags":3332},[2754],"2021-11-08","\n\nBuilding machine learning-powered applications comes with numerous challenges. When we talk about these challenges, there is a tendency to overly focus on problems related to the quality of a model’s predictions—things like data drift, changes in model architectures, or inference latency. \n\nWhile these are all problems worthy of deep consideration, an often overlooked challenge in [ML development](/topics/devops/the-role-of-ai-in-devops/) is the process of integrating a model into an existing software application.  \n\nIf you’re tasked with adding an ML feature to a product, you will almost certainly run into an existing codebase that must play nicely with your model. This is, to put it mildly, not an easy task. \n\nML is a highly iterative discipline. Teams often make many changes to their codebase and pipelines in the process of developing a model. Coupling an ML codebase to an application’s dependencies, unit tests, and CI/CD pipelines will significantly reduce the velocity with which ML teams can deliver on a solution, since each change would require running these downstream dependencies before a merge can be approved.  \n\nIn this post, we’re going to demonstrate how you can use [Comet](https://www.comet.ml/site/) with [GitLab’s DevOps platform](/solutions/devops-platform/) to streamline the workflow for your ML and software engineering teams, allowing them to collaborate without getting in each other's way.      \n\n## The challenge for ML teams working with application teams\n\nLet’s say your team is working on improving a feature engineering pipeline. You will likely have to test many combinations of features with some baseline model for the task to see which combinations make an impact on model performance.     \n \nIt is hard to know beforehand which features might be significant, so having to run multiple experiments is inevitable. If your ML code is a part of your application codebase, this would mean having to run your application’s CI/CD pipeline for every feature combination you might be trying. \n\nThis will certainly frustrate your Engineering and DevOps teams, since you would be unnecessarily tying up system resources, given that software engineering teams do not need to run their pipelines with the same frequency as ML teams do.  \n\nThe other issue is that despite having to run numerous experiments, only a single set of outputs from these experiments will make it to your production application. Therefore, the rest of the assets produced through these experiments are not relevant to your application code.     \n\nKeeping these two codebases separated will make life a lot easier for everyone – but it also introduces the problem of syncing the latest model between two codebases.     \n\n## Use The GitLab DevOps Platform and Comet for your model development process\n\nWith The GitLab DevOps platform and Comet, we can keep the workflows between ML and engineering teams separated, while enabling cross-team collaboration by preserving the visibility and auditability of the entire model development process across teams.     \n\nWe will use two separate projects to demonstrate this process. One project will contain our application code for a handwritten digit recognizer, while the other will contain all the code relevant to training and evaluating our model.  \n\nWe will adopt a process where discussions, code reviews, and model performance metrics get automatically published and tracked within The GitLab DevOps Platform, increasing the velocity and opportunity for collaboration between data scientists and software engineers for machine learning workflows.\n\n## Project setup\n\nOur project consists of two projects: [comet-model-trainer](https://gitlab.com/tech-marketing/devops-platform/comet-model-trainer) and [ml-ui](https://gitlab.com/tech-marketing/devops-platform/canara-review-apps-testing). \n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/cometmodeltrainer.png){: .shadow}\n\nThe **comet-model-trainer** repository contains scripts to train and evaluate a model on the MNIST dataset. We have set up The GitLab DevOps Platform in a way that runs the training and evaluation Pipeline whenever a new merge request is opened with the necessary changes.\n\nThe **ml-ui** repository contains the necessary code to build the frontend of our ML application.\n\nSince the code is integrated with Comet, your ML team can easily track the source code, hyperparameters, metrics, and other details related to the development of the model.  \n\nOnce the training and evaluation steps are completed, we can use Comet to fetch summary metrics from the project as well as metrics from the Candidate model and display them within the merge request; This will allow the ML team to easily review the changes to the model. \n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/buildmodelgraph.png){: .shadow}\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/summarymetrics.png){: .shadow}\n\nIn our case, the average accuracy of the models in the project is 97%. Our Candidate model achieved an accuracy of 99%, so it looks like it is a good fit to promote to production. The metrics displayed here are completely configurable and can be changed as necessary.        \n\nWhen the merge request is approved, the deployment pipeline is triggered and the model is pushed to Comet’s Model Registry. The Model Registry versions each model and links it back to the Comet Experiment that produced it.  \n![Alt text for your image](https://about.gitlab.com/images/blogimages/OpenComet_SparkVideo.gif){: .shadow}    \n\nOnce the model is pushed to the Model Registry, it is available to the application code. When the application team wishes to deploy this new version of the model to their app, they simply have to trigger their specific deployment pipeline.     \n\n## Running the pipeline\n\n### Pipeline outline\n\nWe will run the process outlined below every time a team member creates a merge request to change code in the `build-neural-network`script:\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/modelapprove.png){: .shadow}\n\nNow, let’s take a look at the yaml config used to define our CI/CD pipelines depicted in the previous diagram:\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/workflowsbranch.png){: .shadow}\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/script.png){: .shadow}\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/registermodel.png){: .shadow}\n\nLet's break down the CI/CD pipeline by describing the gitlab-ci.yml file so you can use it and customize it to your needs.\n\nWe start by instructing our GitLab runners to utilize Python:3.8 to run the jobs specified in the pipeline: \n\n`Image: python:3.8`\n\nThen, we define the job where we want to build and train the neural network:\n\n`Build-neural-network`\n\n### Build-neural-network \n\nIn this step, we start by creating a folder where we will store the artifacts generated by this job, install dependencies using the requirements.txt file, and finally  execute the corresponding Python script that will be in charge of training the neural network. The training runs in the GitLab runner using the Python image defined above, along with its dependencies.\n\nOnce the `build-neural-network` job has finalized successfully, we move to the next job: `write-report-mr`\n\nHere, we use another image created by DVC that will allow us to publish a report right in the merge request opened by the contributor who changed code in the neural network script. In this way, we’ve brought software development workflows to the development of ML applications. With the report provided by this job, code and model review can be executed within the merge request view, enabling teams to collaborate not only around the code but also the model performance.\n\nFrom the merge request page, we get access to loss curves and other relevant performance metrics from the model we are training, along with a link to the Comet Experiment UI, where richer details are provided to evaluate the model performance. These details include interactive charts for model metrics, the model hyperparameters, and Confusion Matrices of the test set performance, to name a few. \n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/manualDeploy_SparkVideo.gif){: .shadow}\n\nWhen the team is done with the code and model review,  the merge request gets approved, and the script that generated the model is merged into the main codebase, along with its respective commit and the CI pipeline associated to it. This takes us to the next job: \n\n### Register-model\n\nThis job uses an integration between GitLab and Comet to upload the reviewed and accepted version of the model to the Comet Model Registry. If you recall, the Model Registry is where models intended for production can be logged and versioned. In order to run the commands that will register the model, we need to set up these variables: \n\n- COMET_WORKSPACE\n- COMET_PROJECT_NAME \n \nIn order to do that, follow the steps described [here](https://docs.gitlab.com/ee/ci/variables/#add-a-cicd-variable-to-an-instance).\n\nIt is worth noting that the `register-model` job only runs when the merge request gets reviewed and approved, and this behavior is obtained by setting `only: main` at the end of the job.\n\nFinally, we decide to let a team member have final control of the deployment so therefore we define a manual job:\n`Deploy-ml-ui`\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/deployuiml.png){: .shadow}\n\nWhen triggered, this job will import the model from Comet’s Model Registry and automatically create the necessary containers to build the user interface and deploy to a Kubernetes cluster. \n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/downstream.png){: .shadow}\n\nThis job triggers a downstream pipeline, which means that the UI for this MNIST application resides in a different project. This keeps the codebase for the UI and model training separated but integrated and connected at the moment of deploying the model to a production environment.\n\n![Alt text for your image](https://about.gitlab.com/images/blogimages/multipipeline_SparkVideo.gif){: .shadow}\n\n## Key takeaways\n\nIn this post, we addressed some of the challenges faced by ML and software teams when it comes to collaborating on delivering ML-powered applications. Some of these challenges include:\n\n* The discrepancy in the frequency with which each of these teams need to iterate on their codebases and CI/CD pipelines.\n\n* The fact that only a single set of experiment assets from an ML experimentation pipeline is relevant to the application.\n\n* The challenge of syncing a model or other experiment assets across independent codebases.   \n\nUsing The GitLab DevOps Platform and Comet, we can start bridging the gap between ML and software engineering teams over the course of a project. \n\nBy having model performance metrics adopted into software development workflows like the one we saw in the issue and merge request, we can keep track of the code changes, discussions, experiments, and models created in the process. All the operations executed by the team are recorded, can be audited, are end-to end-traceable, and (most importantly) reproducible. \n\nWatch a demo of this process:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/W_DsNl5aAVk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n_About Comet:_\nComet is an MLOps Platform that is designed to help data scientists and teams build better models faster! Comet provides tooling to Track, Explain, Manage, and Monitor your models in a single place! \n\nLearn more about Comet [here](https://www.comet.ml/site/) and get started for free!\n\n\n\n",[894,2932,232,981],{"slug":3334,"featured":6,"template":678},"machine-learning-on-the-gitlab-devops-platform","content:en-us:blog:machine-learning-on-the-gitlab-devops-platform.yml","Machine Learning On The Gitlab Devops Platform","en-us/blog/machine-learning-on-the-gitlab-devops-platform.yml","en-us/blog/machine-learning-on-the-gitlab-devops-platform",{"_path":3340,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3341,"content":3346,"config":3351,"_id":3353,"_type":16,"title":3354,"_source":17,"_file":3355,"_stem":3356,"_extension":20},"/en-us/blog/gitops-with-gitlab-infrastructure-provisioning",{"title":3342,"description":3343,"ogTitle":3342,"ogDescription":3343,"noIndex":6,"ogImage":2478,"ogUrl":3344,"ogSiteName":692,"ogType":693,"canonicalUrls":3344,"schema":3345},"GitOps with GitLab: Infrastructure provisioning with GitLab and Terraform","In part two of our GitOps series, we set up the infrastructure using GitLab and Terraform. Here's everything you need to know.","https://about.gitlab.com/blog/gitops-with-gitlab-infrastructure-provisioning","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitOps with GitLab: Infrastructure provisioning with GitLab and Terraform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2021-11-04\",\n      }",{"title":3342,"description":3343,"authors":3347,"heroImage":2478,"date":3348,"body":3349,"category":14,"tags":3350},[2014],"2021-11-04","\n\n_It is possible to use GitLab as a best-in-class GitOps tool, and this blog post series is going to show you how. These easy-to-follow tutorials will focus on different user problems, including provisioning, managing a base infrastructure, and deploying various third-party or custom applications on top of them. You can find the entire \"Ultimate guide to GitOps with GitLab\" tutorial series [here](/blog/the-ultimate-guide-to-gitops-with-gitlab/)._\n\nThis post focuses on setting up the underlying infrastructure using GitLab and Terraform. \n\nThe first step is to have a network and some computing instances that we can use as our Kubernetes cluster. In this project, I’ll use [Civo](https://www.civo.com) to host the infrastructure as it has the most minimal setup, but the same can be achieved using any of the hyperclouds. GitLab documentation provides examples on how to set up a [cluster on AWS](https://docs.gitlab.com/ee/user/infrastructure/clusters/connect/new_eks_cluster.html) or [GCP](https://docs.gitlab.com/ee/user/infrastructure/clusters/connect/new_gke_cluster.html).\n\nWe want to have a project that describes our [infrastructure as code (IaC)](/topics/gitops/infrastructure-as-code/). As Terraform is today the de facto standard in infrastructure provisioning, we’ll use Terraform for the task. Terraform requires a state storage backend; We will use the GitLab managed Terraform state that is very easy to get started. Moreover, we will set up a pipeline to run the infrastructure changes automatically if they are merged to the main branch.\n\n## What infrastructure related steps are we going to codify?\n\n1. Create a VPC\n2. Set up a Kubernetes cluster\n\nActually, we will create separate Terraform projects for these 3 steps under a single GitLab project. We split the infrastructure because in a real world scenario, these projects will likely be a bit bigger, and Terraform slows down quite a lot if it has to deal with big projects. In general, it is a good practice to have small Terraform projects, and think about the infrastructure in a layered way, where higher layers can reference the output of lower layers. There are [many ways to access the output of another Terraform project](https://www.terraform.io/docs/language/state/remote-state-data.html#alternative-ways-to-share-data-between-configurations), and we leave it up to the reader to learn more about these. In this case, we will use simple data resources.\n\nAfter this long intro, let’s get started!\n\n## Creating the network\n\nFirst, let’s create a new GitLab project. You can use either an empty project or any of the project templates. If you plan to do all these tutorials, I recommend starting with the [Cluster Management Project template](https://docs.gitlab.com/ee/user/clusters/management_project_template.html). Once the project is ready, let’s create the following files:\n\n- A `terraform/network/main.tf` file:\n\n```hcl\nterraform {\n  required_providers {\n    civo = {\n      source = \"civo/civo\"\n      version = \"0.10.10”\n    }\n  }\n  backend \"http\" {\n  }\n}\n\n# Configure the Civo Provider\nprovider \"civo\" {\n  token = var.civo_token\n  region = local.region\n}\n\nresource \"civo_network\" \"network\" {\n    label = \"development\"\n}\n```\n\nThis file describes almost everything we want this project to do. The first block configures Terraform to use the `civo/civo` provider and a simple `http` backend for state storage. As I mentioned above, we will use [the GitLab managed Terraform state](https://docs.gitlab.com/ee/user/infrastructure/iac/terraform_state.html), that acts like an `http` backend from Terraform’s point of view. The GitLab backend is versioned and encrypted by default, and GitLab CI/CD contains all the environment variables needed to access it. I will demonstrate later how you can access the backend either from the local command line or from GitLab CI/CD.\n\nNext we configure the `Civo` provider. You can see that here we use two variables, an input and a local variable. These will be defined in separate files below. Finally, we describe a network and give it the “development” label.\n\n- A `terraform/network/outputs.tf` file:\n\n```hcl\noutput \"network\" {\n  value = civo_network.network.id\n}\n```\n\nThis file just provides the network id as an output variable from Terraform. Other projects could consume it. We won’t use this, but I consider it a good practice as it might help to debug issues.\n\n- A `terraform/network/locals.tf` file:\n\n```hcl\nlocals {\n  region = \"LON1\"\n}\n```\n\nHere we define the `region` local as mentioned under the description of the `main.tf` file. Why aren’t we making it an input variable? Because this is closely related to our infrastructure and for this reason we want to keep it in code. It should be version controlled and changes should be reviewed following the team’s processes. We could write the values into a `.tfvars` file also to achieve versioning and have it as a variable. I prefer to keep it in `hcl` to have it closer to the rest of the code.\n\n- A `terraform/network/variables.tf` file:\n\n```hcl\nvariable \"civo_token\" {\n  type = string\n  sensitive = true\n}\n```\n\nFinally, we define the Civo access token as an input variable.\n\nNow, we are ready with the Terraform code, but we cannot access the GitLab state backend yet. For that we either need to configure our local environment or GitLab CI/CD. Let’s see both setups.\n\n## Running Terraform locally\n\nYou can run Terraform either locally or using GitLab CI/CD. The following two sections present both approaches.\n\n### Accessing the GitLab Terraform state backend locally\n\nThe simplest way to configure the “http” backend is using environment variables. There are many environment variables needed though! For this reason, I prefer to use a collection of [direnv](https://direnv.net/) files. We will need all these environment variables configured:\n\n```\nTF_HTTP_PASSWORD\nTF_HTTP_USERNAME\nTF_HTTP_ADDRESS\nTF_HTTP_LOCK_ADDRESS\nTF_HTTP_LOCK_METHOD\nTF_HTTP_UNLOCK_ADDRESS\nTF_HTTP_UNLOCK_METHOD\nTF_HTTP_RETRY_WAIT_MIN\n```\n\nDirenv enables us to add a few files to our repository to describe the above environment variables in a nice and scalable way. Clearly, there are some variables that are sensitive, like `TF_HTTP_PASSWORD`, so this should not be stored in git. Moreover, we could reuse most of these variables in the other two Terraform projects we are going to create. With these considerations in mind, let’s create the following 3 files:\n\n- Create `terraform/network/.envrc`: \n\n```\nexport TF_STATE_NAME=civo-${PWD##*terraform/}\nsource_env ../../.main.env\n```\n\nThis sets the `TF_STATE_NAME` variable to `civo-network` using some bash magic and loads the `.main.env` file from the root of the repository using the `source_env` method provided by `direnv`. This can be added to version control safely.\n\n- Create `.main.env`:\n\n```\nsource_env_if_exists ./.local.env\n\nCI_PROJECT_ID=28431043\nexport TF_HTTP_PASSWORD=\"${CI_JOB_TOKEN:-$GITLAB_ACCESS_TOKEN}\"\nexport TF_HTTP_USERNAME=\"${GITLAB_USER_LOGIN}\"\nexport GITLAB_URL=https://gitlab.com\n\nexport TF_VAR_remote_address_base=\"${GITLAB_URL}/api/v4/projects/${CI_PROJECT_ID}/terraform/state\"\nexport TF_HTTP_ADDRESS=\"${TF_VAR_remote_address_base}/${TF_STATE_NAME}\"\nexport TF_HTTP_LOCK_ADDRESS=\"${TF_HTTP_ADDRESS}/lock\"\nexport TF_HTTP_LOCK_METHOD=\"POST\"\nexport TF_HTTP_UNLOCK_ADDRESS=\"${TF_HTTP_LOCK_ADDRESS}\"\nexport TF_HTTP_UNLOCK_METHOD=\"DELETE\"\nexport TF_HTTP_RETRY_WAIT_MIN=5\n\n# export TF_LOG=\"TRACE\"\n```\n\nThis file contains the bulk of the environment variables we need, and can be added to version control safely as no secrets are stored there. The first line loads the `.local.env` file that will contain the sensitive values, again using a `direnv` method. The second line contains the GitLab project ID. This is shown under the project name of your GitLab project. The next three lines configure access to GitLab. The username and password will be populated from the `local.env` file, while the `GITLAB_URL` variable is there to help you if you are on a self-managed GitLab instance.\n\n- Create `.local.env` and add it to `.gitignore`:\n\n```\nGITLAB_ACCESS_TOKEN=\u003Cyour GitLab personal access token>\nGITLAB_USER_LOGIN=\u003Cyour GitLAb username>\nexport TF_VAR_civo_token=\u003Cyour Civo access token>\n```\n\nClearly, I cannot provide the values for this file. Please fill them out with your credentials. You can generate a GitLab personal access token under your settings. To access the GitLab managed Terraform state using a personal access token, the token should have the `api` scope enabled.\n\nWarning: **Don’t forget to add this file to `.gitignore`**. Actually, I have it in my global gitignore file to avoid accidental commits.\n\nAs the environment variables are set up, you should make direnv to start using these variables. When you `cd` into the `terraform/network` directory a warning should appear asking you to run `direnv allow`. Enable the environment variables:\n\n```\ncd terraform/network\ndirenv allow\n```\n\n### Creating the network - finally\n\nLet’s see if we managed to set up everything right!\n\n```\nterraform init\nterraform plan\n```\n\nThe first command just initializes Terraform, downloads the Civo plugin and does some sanity checks. The second command on the other hand connects to the remote state backend, and computes the necessary changes to provide the infrastructure we described in this project.\n\nIf we like the changes, we can apply them with\n\n```\nterraform apply\n```\n\n_Nota bene_, in a real world setup, you would likely output a plan file from `terraform plan` and feed it into `terraform apply`, just like the CI/CD setup will do it later. Anyway, this is good enough for us, so let’s create the cluster next.\n\n### Running Terraform using GitLab CI/CD\n\nNote: This section assumes that you have access to GitLab Runners to run the CI/CD jobs.\n\nGiven the flexibility of GitLab CI/CD it can be set up in many different ways. Here we will build a pipeline that incorporates the most important aspects of a Terraform-oriented pipeline, without restricting you to require merge requests or any other processes. The only restriction we'll place on it is that changes should only be applied on the main branch and this should be a manual action.\n\nCopy the following code into `.gitlab-ci.yml` in the root of your project:\n\n```yaml\ninclude:\n  - template: \"Terraform/Base.latest.gitlab-ci.yml\"\n\nstages:\n- init\n- build\n- deploy\n\nnetwork:init:\n  extends: .terraform:init\n  stage: init\n  variables:\n    TF_ROOT: terraform/network\n    TF_STATE_NAME: network\n  only:\n    changes:\n      - \"terraform/network/*\"\n\nnetwork:review:\n  extends: .terraform:build\n  stage: build\n  variables:\n    TF_ROOT: terraform/network\n    TF_STATE_NAME: network\n  resource_group: tf:network\n  only:\n    changes:\n      - \"terraform/network/*\"\n\nnetwork:deploy:\n  extends: .terraform:deploy\n  stage: deploy\n  variables:\n    TF_ROOT: terraform/network\n    TF_STATE_NAME: network\n  resource_group: tf:network\n  environment:\n    name: dns\n  when: manual\n  only:\n    changes:\n      - \"terraform/network/*\"\n    variables:\n      - $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n```\n\nThis CI pipeline re-uses [the latest base Terraform CI template](https://gitlab.com/gitlab-org/gitlab/-/tree/master/lib/gitlab/ci/templates/Terraform) shipped with GitLab, and runs the jobs by simply parameterizing them as function calls. Let's review quickly the keys used:\n\n- the [`stages`](https://docs.gitlab.com/ee/ci/yaml/#stages) keyword provides a list of stages to compose the pipeline\n- the [`extends`](https://docs.gitlab.com/ee/ci/yaml/#extends) keyword refers to the job defined in [the base Terraform template](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Terraform/Base.latest.gitlab-ci.yml)\n- the [`variables`](https://docs.gitlab.com/ee/ci/yaml/#variables) keywords parameterizes the job for our requirements\n- the [`resource_group`](https://docs.gitlab.com/ee/ci/yaml/#resource_group) keyword assures that always only one potentially conflicting job is run\n- the [`only`](https://docs.gitlab.com/ee/ci/yaml/#only--except) keyword restricts runs to specific situations\n\nIf you commit this file and push it to GitLab, a new pipeline will be created that as a last step provides you a manual job to create your network. We will extend this file later throughout this tutorial series.\n\n## Create a Kubernetes cluster\n\nThe code required for the cluster will be very similar to the code for the network.\n\n- Add `terraform/cluster/outputs.tf` file:\n\n```hcl\nterraform {\n  required_providers {\n    civo = {\n      source = \"civo/civo\"\n      version = \"0.10.4\"\n    }\n  }\n  backend \"http\" {\n  }\n}\n\n# Configure the Civo Provider\nprovider \"civo\" {\n  token = var.civo_token\n  region = local.region\n}\n\nresource \"civo_kubernetes_cluster\" \"dev-cluster\" {\n    name = \"dev-cluster\"\n    // tags = \"gitlab demo\"  // Do not add tags! There is a bug in the civo-provider :(\n    network_id = data.civo_network.network.id\n    applications = \"\"\n    num_target_nodes = 3\n    target_nodes_size = element(data.civo_instances_size.small.sizes, 0).name\n}\n```\n\nThe only difference compared to `terraform/network/outputs.tf` is the last resource as that describes the cluster. You can see how we reference the network created before. Of course, we'll need a `data` resource for this and the instance sizes.\n\n- Add `terraform/cluster/data.tf` file:\n\n```hcl\ndata \"civo_instances_size\" \"small\" {\n    filter {\n        key = \"name\"\n        values = [\"g3.small\"]\n        match_by = \"re\"\n    }\n\n    filter {\n        key = \"type\"\n        values = [\"instance\"]\n    }\n\n}\n\ndata \"civo_network\" \"network\" {\n    label = \"development\"\n}\n```\n\n\n- The `terraform/cluster/locals.tf` file outputs some useful details. We won't use them now, but they often come in handy in the longer term.\n\n```hcl\noutput \"cluster\" {\n  value = {\n    status = civo_kubernetes_cluster.dev-cluster.status\n    master_ip = civo_kubernetes_cluster.dev-cluster.master_ip\n    dns_entry = civo_kubernetes_cluster.dev-cluster.dns_entry\n  }\n}\n```\n\n- The `terraform/cluster/locals.tf` file is the same as for the network project:\n\n```hcl\nlocals {\n  region = \"LON1\"\n}\n```\n\n- The `terraform/cluster/variables.tf` file is the same as for the network project:\n\n```hcl\nvariable \"civo_token\" {\n  type = string\n  sensitive = true\n}\n```\n\n### Provision the cluster\n\nLet's see how can we extend the previous local and CI/CD setups to run this Terraform project!\n\n#### Running locally\n\n- Create `terraform/cluster/.envrc`  as you did for the network project:\n\n```\nexport TF_STATE_NAME=civo-${PWD##*terraform/}\nsource_env ../../.main.env\n```\n\nNow run Terraform:\n\n```bash\nterraform init\nterraform plan\nterraform apply\n```\n\n#### Running from CI/CD\n\nExtend the `.gitlab-ci.yaml` file with the following 3 jobs:\n\n```hcl\ncluster:init:\n  extends: .terraform:init\n  stage: init\n  variables:\n    TF_ROOT: terraform/cluster\n    TF_STATE_NAME: cluster\n  only:\n    changes:\n      - \"terraform/cluster/*\"\n\ncluster:review:\n  extends: .terraform:build\n  stage: build\n  variables:\n    TF_ROOT: terraform/cluster\n    TF_STATE_NAME: cluster\n  resource_group: tf:cluster\n  only:\n    changes:\n      - \"terraform/cluster/*\"\n\ncluster:deploy:\n  extends: .terraform:deploy\n  stage: deploy\n  variables:\n    TF_ROOT: terraform/cluster\n    TF_STATE_NAME: cluster\n  resource_group: tf:cluster\n  environment:\n    name: dev-cluster\n  when: manual\n  only:\n    changes:\n      - \"terraform/cluster/*\"\n    variables:\n      - $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n```\n\nAs you can see these are the same jobs that we saw already, they are just parameterized for the `cluster` Terraform project.\n\nOnce you push your code to GitLab, you cluster should be ready in a few minutes!\n\n_[Click here](/blog/the-ultimate-guide-to-gitops-with-gitlab/) for the next tutorial._\n\n\n\n",[535,1002,915],{"slug":3352,"featured":6,"template":678},"gitops-with-gitlab-infrastructure-provisioning","content:en-us:blog:gitops-with-gitlab-infrastructure-provisioning.yml","Gitops With Gitlab Infrastructure Provisioning","en-us/blog/gitops-with-gitlab-infrastructure-provisioning.yml","en-us/blog/gitops-with-gitlab-infrastructure-provisioning",{"_path":3358,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3359,"content":3364,"config":3370,"_id":3372,"_type":16,"title":3373,"_source":17,"_file":3374,"_stem":3375,"_extension":20},"/en-us/blog/gitops-with-gitlab",{"title":3360,"description":3361,"ogTitle":3360,"ogDescription":3361,"noIndex":6,"ogImage":2478,"ogUrl":3362,"ogSiteName":692,"ogType":693,"canonicalUrls":3362,"schema":3363},"GitOps delivery by connecting Kubernetes clusters to GitLab","This is the first in a seven-part series on GitOps using GitLab's DevOps Platform.","https://about.gitlab.com/blog/gitops-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Here's how to do GitOps with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2021-10-21\",\n      }",{"title":3365,"description":3361,"authors":3366,"heroImage":2478,"date":3367,"body":3368,"category":14,"tags":3369},"Here's how to do GitOps with GitLab",[2014],"2021-10-21","\n\n_It is possible to use GitLab as a best-in-class GitOps tool, and this blog post series is going to show you how. These easy-to-follow tutorials will focus on different user problems, including provisioning, managing a base infrastructure, and deploying various third-party or custom applications on top of them. You can find the entire \"Ultimate guide to GitOps with GitLab\" tutorial series [here](/blog/the-ultimate-guide-to-gitops-with-gitlab/)._\n\nThis post provides an overview of the series, and will provide a bit of context around GitOps, [Infrastructure as Code](/topics/gitops/infrastructure-as-code/), and related notions.\n\n## Start with the buzzwords\n\nThe DevOps industry is changing at a very fast pace, and there are plenty of new ideas popping up around this transformation. What are these? Let’s look into the following concepts and why they matter: DevOps, site reliability engineers (SRE), GitOps, Infrastructure as Code, and containers.\n\nThe term DevOps was coined by Patrick Debois in 2009. DevOps is a cultural approach, not a technology or a set of processes. At its core there are a few principles such as continuous learning, fast feedback loops and a clear flow of work. There is a strong connection between DevOps and SRE, as one can think of the SRE approach as a well-defined implementation of DevOps. Two important aspects of the SRE approach are codified infrastructure management and metrics. These enable the level of automation needed for feedback, and their central metrics (SLIs) are being moved to the left down to development teams too.\n\nWith the emergence of cloud computing, infrastructure can be managed fully through APIs. This gave rise to Infrastructure as Code or IaC. IaC means infrastructure engineers almost never have to click through a provider’s UI to configure a new user or a resource. IaC approaches can be used to configure GitLab itself or to allow GitLab to configure a 3rd party system (such as creating a cluster or managing databases).\n\n[GitOps](/topics/gitops/) is the new kid on the block here, and it basically summarizes the current state of our industry. IaC projects likely store their code in version-controlled ways, probably in git. They might even be automated through pipelines, and the resulting infrastructure might have good observability built into the whole stack. So, what does GitOps bring to the table? It brings us two things. First, GitOps wants to avoid drift using a reconciliation loop that automatically “fixes” the infrastructure if it deviates from the codified state found in the IaC repository. Whether this is feasible and how this is done is still a debated question. At the same time, the rise of declarative infrastructure popularized by Kubernetes makes this a compelling approach to many. The second benefit of GitOps is the \"declarative\" ability. By being declarative, the desired state of the infrastructure is described in the git repo. This simplifies complexity in provisioning as the end-system is tasked by setting up the described infrastructure. Contrast this with an imperative setup where the administrators have to codify the exact steps of setting up the infrastructure.\n\nContainers are mentioned here for a single reason: Once we get to deployments, I am going to focus on containerized applications only. Containers have already proved to be a great layer of abstraction for application delivery.\n\nYou can [read more about the evolution of DevOps](/blog/gitops-as-the-evolution-of-operations/) and how we got to GitOps as part of this evolution.\n\n## The series overview\n\n**Infrastructure provisioning with GitLab and Terraform**: My next post in the series will outline how to use GitLab to provision infrastructure. In this post I will use a GitLab project to create an EKS cluster following IaC best practices. To do this I will use Terraform, as Terraform is considered to be the de facto standard in infrastructure provisioning, and GitLab has strong built-in support for it.\n\n**Connecting GitLab with a Kubernetes cluster - Quickstart**: This post will show how one can quickly connect a cluster with GitLab using our recommended way, the GitLab Agent for Kubernetes. As this is a quickstart, this approach does not use all the GitLab IaC recommendations. Nevertheless it is a great start that we can build upon later. This post will outline the different approaches for connecting a cluster to GitLab, including our recommended approach.\n\n**Secrets management with GitLab**: In the third post, I will deploy a simple “secrets as code” solution into our cluster and set it up for future use. This will demonstrate how third-party services can easily be deployed and managed with GitLab. Moreover, this specific tool will be used in the subsequent post where we migrate from the quickStart cluster connection to a self-managing, IaC connection.\n\n**Managing the cluster connection from code**: In the second post, we created a GitLab-connected cluster, but there we either need to manage the cluster from our local CLI or need to do some CI magic. Now I will demonstrate how to build out a more robust management for the cluster connection. We set up the cluster connection to manage itself using a pull-based approach.\n\n**Integrate the cluster into GitLab**: As GitLab is not just an SCM and CI tool, but the complete DevOps Platform, it has robust monitoring and security integrations with Kubernetes. In this post I am going to show how one can use the GitLab-provided cluster management application on top of our cluster connection, and install NGINX, Cilium, and custom runners with minimal effort, in an IaC style.\n\n**Application deployment with Auto DevOps**: The final post in the series will illustrate how business applications can be easily deployed into the cluster. I will focus on push-based deployments as many development teams might be familiar with pipelines, unlike the most recent pull-based approaches. At the same time, given the content from the previous posts, it should be possible to put together a pull-based deployment as top of Auto DevOps as well.\n\n_[Click here](/blog/the-ultimate-guide-to-gitops-with-gitlab/) for the next tutorial._\n\n\n",[535,894,1002],{"slug":3371,"featured":6,"template":678},"gitops-with-gitlab","content:en-us:blog:gitops-with-gitlab.yml","Gitops With Gitlab","en-us/blog/gitops-with-gitlab.yml","en-us/blog/gitops-with-gitlab",{"_path":3377,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3378,"content":3384,"config":3390,"_id":3392,"_type":16,"title":3393,"_source":17,"_file":3394,"_stem":3395,"_extension":20},"/en-us/blog/gitlab-cnh-for-50k-users",{"title":3379,"description":3380,"ogTitle":3379,"ogDescription":3380,"noIndex":6,"ogImage":3381,"ogUrl":3382,"ogSiteName":692,"ogType":693,"canonicalUrls":3382,"schema":3383},"Ready-To-Run GitLab for 50,000 users with AWS Quick Start","If you have two hours, you can deploy a GitLab instance on EKS for any number of users. All it takes is about 14 clicks! Here's what you need to know.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680619/Blog/Hero%20Images/construction-blueprint.jpg","https://about.gitlab.com/blog/gitlab-cnh-for-50k-users","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to provision Ready-To-Run GitLab for 50,000 users with the AWS Quick Start\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2021-10-06\",\n      }",{"title":3385,"description":3380,"authors":3386,"heroImage":3381,"date":3387,"body":3388,"category":14,"tags":3389},"How to provision Ready-To-Run GitLab for 50,000 users with the AWS Quick Start",[1701],"2021-10-06","\n\nIf you have spent time reviewing GitLab Reference Architectures, you may have noticed the flexibility of the GitLab codebase; it's possible to support a broad range of implementations from a single box for under one hundred users to horizontal hyper-scaled setups for 50,000 or more.\n\nScaling to massive sizes requires the services within GitLab to be broken out into dedicated compute and storage layers so they can each expand cost effectively based on high loading and an organization's specific usage patterns.\n\nThose who provision large scale systems on the cloud generally turn to [Infrastructure as Code (IaC)](/direction/delivery/infrastructure_as_code/) to ensure consistency and to allow easy setup of pre-production environments for the target system. Until recently, GitLab implementers have had to craft this code from scratch.\n\nNow, thanks to our investments in IaC tooling, GitLab customers now have an entire implementation eco-system to work from. These efforts include the [GitLab Environment Toolkit (GET)](/blog/why-we-are-building-the-gitlab-environment-toolkit-to-help-deploy-gitlab-at-scale/) and the AWS Quick Start for cloud native hybrid on EKS.\n\nThis post will focus on the AWS Quick Start - but it's worth noting both initiatives are open source - so you can consume, customize and contribute!\n\n## What is an AWS Quick Start?\n\nAWS Quick Starts are much more than the \"getting started\" feeling implied by their name. As a part of the Quick Start program, AWS ensures that each one reflects the best practices of the software vendor (GitLab in this case) as well as AWS' own well-architected standards. They reflects a high level of technical partnership and technical assurance by both companies. The Quick Start program also includes a hard requirement for high availability of every component of the deployed application. Even bastion hosts are run in an autoscaling group so they will respawn if they unexpectedly terminate. Quick Starts are also intended to create a \"Ready-to-Run\" implementation whenever possible. Quick Starts are open source and have a dependency model which allows GitLab to reuse the existing EKS Quick Start as a foundation.\n\n## What Is the GitLab AWS implementation pattern for cloud native hybrid on EKS?\n\nGitLab has Reference Architectures that determine how to install GitLab for various user counts. Each Reference Architecture has a section on cloud native hybrid to show how to configure it and the advised number of vCPUs and memory for the target user count. Each one is similar to blueprints for a building. \n\nThe AWS implementation pattern for cloud native hybrid on EKS builds on this information by:\n\n- Showing how to maximize the usage of AWS PaaS with assurance of GitLab Reference Architecture compliance.\n- Showing a tally of total cluster resources as specified by the Rreference Architecture.\n- Presenting a bill of materials listing:\n\n  - EKS node instance type (sizing) and count as tested.\n  - RDS PostgreSQL and Redis Elasticache instance types (sizing) and count as tested.\n  - Gitaly Cluster instance types (sizing) and count as tested.\n  \n- [GPT testing](https://gitlab.com/gitlab-org/quality/performance) results for a system configured according to the bill of materials. This can be used to compare back to the reference architectures and to your own configuration that is based on the bill of materials.\n\nSo while the Reference Architectures are like building blueprints, the AWS implementation pattern for cloud native hybrid on EKS intends to be like a bBill of mterials (shopping list) you can plug directly into the parameters of the AWS Quick Start or the GitLab Environment Toolkit to build GitLab on EKS with a pre-tested configuration.\n\n## \"Deploy Now\" links\n\nWithin each AWS implementation pattern for cloud native hybrid on EKS you will find some \"Deploy Now\" links.  These make the AWS Quick Start even easier to use by presetting all the instance types and instance counts based on the bill of materials for the user size.  This reduces the number of fields you need to fill out on the Quick Start form. The Deploy Now links are how we were able to reduce the number of clicks to deploy for 50,000 users to just 14.\n\nThe Quick Start takes about two hours to deploy regardless of the size of instance you choose.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/s3ZaBXYG8nc\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## How you can deploy GitLab for any number of users in a couple of hours\n\nThe YouTube playlist [Learning to provision the AWS Quick Start for GitLab on EKS](https://youtube.com/playlist?list=PL05JrBw4t0Koi8VBnoVhmj_MstnbJjGw5) walks you through:\n\n1. [GitLab Reference Architectures, performance testing, cloud native hybrid and what is Gitaly](https://www.youtube.com/watch?v=1TYLv2xLkZY&list=PL05JrBw4t0Koi8VBnoVhmj_MstnbJjGw5&index=1&t=399s) (11mins)\n2. [An overview of GitLab AWS implementation patterns](https://www.youtube.com/watch?v=_x3I1aq7fog&list=PL05JrBw4t0Koi8VBnoVhmj_MstnbJjGw5&index=2) (13mins)\n3. [An overview of AWS Quick Start for cloud native hybrid on EKS](https://www.youtube.com/watch?v=XHg6m6fJjRY&list=PL05JrBw4t0Koi8VBnoVhmj_MstnbJjGw5&index=3&t=8s) (9mins)\n4. [Provisioning Ready-To-Run GitLab for 50,000 users in 14 clicks and a long lunch)](https://www.youtube.com/watch?v=s3ZaBXYG8nc&list=PL05JrBw4t0Koi8VBnoVhmj_MstnbJjGw5&index=4&t=798s) (21mins) - same as above video.\n5. [Easy performance testing an AWS Quick Start-provisioned GitLab cloud native hybrid instance](https://www.youtube.com/watch?v=QpkF1vXXCjk&list=PL05JrBw4t0Koi8VBnoVhmj_MstnbJjGw5&index=5&t=510s) (32mins)\n\nIf you would like help getting started with Gitlab instance provisioning on AWS, please contact your GitLab account team or reach out to [GitLab Sales](https://about.gitlab.com/sales/)!\n",[873,232,771],{"slug":3391,"featured":6,"template":678},"gitlab-cnh-for-50k-users","content:en-us:blog:gitlab-cnh-for-50k-users.yml","Gitlab Cnh For 50k Users","en-us/blog/gitlab-cnh-for-50k-users.yml","en-us/blog/gitlab-cnh-for-50k-users",{"_path":3397,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3398,"content":3403,"config":3410,"_id":3412,"_type":16,"title":3413,"_source":17,"_file":3414,"_stem":3415,"_extension":20},"/en-us/blog/gitpod-desktop-app-personal-activities",{"title":3399,"description":3400,"ogTitle":3399,"ogDescription":3400,"noIndex":6,"ogImage":2478,"ogUrl":3401,"ogSiteName":692,"ogType":693,"canonicalUrls":3401,"schema":3402},"Why we built GitDock, our desktop app to navigate your GitLab activities","Life is full of moving parts. We get it. And that's why we created GitDock so you can keep track of all things GitLab right from your desktop.","https://about.gitlab.com/blog/gitpod-desktop-app-personal-activities","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we built GitDock, our desktop app to navigate your GitLab activities\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Marcel van Remmerden\"},{\"@type\":\"Person\",\"name\":\"Jeremy Elder\"}],\n        \"datePublished\": \"2021-10-05\",\n      }",{"title":3399,"description":3400,"authors":3404,"heroImage":2478,"date":3407,"body":3408,"category":14,"tags":3409},[3405,3406],"Marcel van Remmerden","Jeremy Elder","2021-10-05","\n\nKeeping track of everything that is happening in your GitLab projects and groups can be quite overwhelming. Often times you care about not only one project, but multiple ones. Even worse, these projects might even belong to different groups, making everything more complex.\n\nAs an example, product designers at GitLab might work on all of these different projects over the course of just one week:\n\n- [gitlab-org/gitlab](https://gitlab.com/gitlab-org/gitlab) (our product)\n- [gitlab-com/www-gitlab-com](https://gitlab.com/gitlab-com/www-gitlab-com) (our handbook)\n- [gitlab-org/gitlab-design](https://gitlab.com/gitlab-org/gitlab-design/) (space for discussions)\n- [gitlab-org/gitlab-services/design.gitlab.com](https://gitlab.com/gitlab-org/gitlab-services/design.gitlab.com) (our design system)\n- [gitlab-org/ux-research](https://gitlab.com/gitlab-org/ux-research) (research studies)\n\n## User-centric vs. project-centric navigation\n\nOne of our product design managers ([@jackib](https://gitlab.com/jackib)) created a visualization that shows the current project-centric navigation model that we have in place.\n\n![Project-centric navigation](https://about.gitlab.com/images/blogimages/2021-10-05-gitdock/project-centric-navigation.png)\n\nThis model puts the burden of keeping track of your activities and the work you care about on the user. We would rather look for opportunities where we can enable a more user-centric navigation.\n\n![User-centric navigation](https://about.gitlab.com/images/blogimages/2021-10-05-gitdock/user-centric-navigation.png)\n\n## Why do we care about this?\n\nUsers already have different ways to stay up to date, for example email notifications, our \"to-dos,\" or custom systems they have set up for themselves. However, when we ran a UX research study, we noticed these tools often times only show a small subset of the things that users are curious about or the tools have to be checked multiple times during the day.\n\nA short summary of the main points we learned from this study:\n\n- Maintainers care about what happened to their project since they last looked at it.\n- Users repeatedly check their pipelines to see the results.\n- Often times users need to jump back into issues/MRs they have recently contributed to.\n\n## What is GitDock?\n\nGitDock is a desktop app you can install on your macOS/Windows/Linux machine (download [latest release](https://gitlab.com/mvanremmerden/gitdock/-/releases)). When installed, you will have an icon on your menu bar that brings up a small window.\n\n![GitDock](https://about.gitlab.com/images/blogimages/2021-10-05-gitdock/gitdock-window.png)\n\nFrom there you will have direct access to the following information:\n\n- The last pipelines you triggered\n- Your recently viewed GitLab objects (MRs, Issues, Epics, etc...)\n- Favorite projects\n- Your most recent comments\n- Bookmarked items\n\nGitDock also sends you a system notification whenever a pipeline completes, or when a new to-do was created for you.\n\nAll of these features try to put the user at the center. You can see me walk through all functionality in this overview video:\n\n[![YouTube video](https://about.gitlab.com/images/blogimages/2021-10-05-gitdock/gitdock-youtube.png)](https://www.youtube.com/watch?v=WkVS38wo4_w)\n\nYou can also see the entire code in our [GitDock](https://gitlab.com/mvanremmerden/gitdock) project and download the [newest release for your machine](https://gitlab.com/mvanremmerden/gitdock/-/releases). \n\n## Why didn't we make this part of our Web UI?\n\nThe main goal for GitDock is to help us learn how users want to navigate in this more user-centric approach. We decided to build this [minimum viable change (MVC)](https://handbook.gitlab.com/handbook/values/#minimal-viable-change-mvc) in a separate product as it allowed us to move faster and use a few shortcuts, e.g. relying on the local browser history for the recently viewed items instead of storing these in our database. It also permitted us to cut some corners on performance as our API is not yet optimized for this approach. Here's one way example of how it's not optimized: getting the last pipeline you triggered requires three API calls to different endpoints.\n\nOne other advantage is that it gives us a space to test new ideas that we are curious about without having to fully commit to them (e.g. bookmarks).\n\n## What are the next steps?\n\nWe want to use the learnings and data from this project to help us [build a better start page for GitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/225331). Right now this page is configurable and can show you different content, but almost 99% of users keep the default \"Your projects\" list as start page. We don't think users do this because it is truly the most useful option, and we want to create a better experience for this.\n\nThat's why we are still looking for feedback. Let us know what you think about GitDock and what other content would be helpful for you in a start page, or other navigation feature.\n",[915,1144,894],{"slug":3411,"featured":6,"template":678},"gitpod-desktop-app-personal-activities","content:en-us:blog:gitpod-desktop-app-personal-activities.yml","Gitpod Desktop App Personal Activities","en-us/blog/gitpod-desktop-app-personal-activities.yml","en-us/blog/gitpod-desktop-app-personal-activities",{"_path":3417,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3418,"content":3423,"config":3428,"_id":3430,"_type":16,"title":3431,"_source":17,"_file":3432,"_stem":3433,"_extension":20},"/en-us/blog/how-to-status-checks",{"title":3419,"description":3420,"ogTitle":3419,"ogDescription":3420,"noIndex":6,"ogImage":2478,"ogUrl":3421,"ogSiteName":692,"ogType":693,"canonicalUrls":3421,"schema":3422},"How to use external status checks for merge requests","Want to integrate third-party systems and apps with GitLab merge requests? Here's everything you need to know.","https://about.gitlab.com/blog/how-to-status-checks","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use external status checks for merge requests\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2021-10-04\",\n      }",{"title":3419,"description":3420,"authors":3424,"heroImage":2478,"date":3425,"body":3426,"category":14,"tags":3427},[1101],"2021-10-04","\n\nThe [external status checks for merge requests capability](/releases/2021/07/22/gitlab-14-1-released/#external-status-checks-for-merge-requests) was recently introduced in GitLab and it allows the integration of third-party systems and applications with GitLab merge requests.\n\n## What are \"external status checks for merge requests\"?\n\nExternal status checks are API calls to systems or applications that sit outside GitLab. These API calls are invoked during merge requests, which display a widget with the status of each external check. With external status checks, you can integrate GitLab with third-party systems, e.g. Salesforce, PeopleSoft, Microsoft Dynamics, etc., that require manual approval for merge requests. This makes it easy to see that merge requests have met external requirements before being merged, adding an extra method to ensure compliance and audit requirements are met.\n\n## Steps to enable and use external status checks for merge requests\n\nIn this example, I have a sample project called **my-proj**, for which I'd like to add and exercise a single external status check, which will hypothetically do some kind of validation for the merge request.\n\n### Adding an external status check to your project\n\nExternal status checks are added to merge requests by heading to your project’s **Settings > General** and then expanding the **Merge requests** section. Towards the bottom of the **Merge requests** section, you will see an **Add status check** button, which you will need to click to to display the **Add status check** pop-up dialog:\n\n\u003C!--\n![Add status check dialog](https://about.gitlab.com/images/blogimages/how-to-status-checks/1-add-status-check-dialog.png){: .shadow.small.center.wrap-text}\nAdd status check dialog with filled values\n{: .note.text-center}\n-->\n\n\u003Cimg src=\"/images/blogimages/how-to-status-checks/1-add-status-check-dialog.png\" width=\"50%\" height=\"50%\">\nAdd status check dialog with filled values\n{: .note.text-center}\n\nIn the dialog above, the external service name is being given the name *compliance-check*. The external API that will be called is:\n\n> https://tech-marketing-sandbox-cd-compvalidator.compliance.gitlabworkshops.io/validate\n\n> **NOTE:** the *validate* service above was [a simple Java service that I set up](https://gitlab.com/tech-marketing/sandbox/cd/compvalidator) ahead of time to mimic a third-party external service. It returned an HTTP 200 success message when invoked. In a real life scenario, this external API call would be a SaaS service or an on-premises ERP system, for example.\n\nThe API above is a call - invoked from any merge requests created under this project - to an external system that will run a compliance check and validate modifications to this application.\n\nAs the target branch, the default *Any branch* has been selected. Another option could have been the *main* branch.\n\nWhen you click the **Add status check** button, an entry will be created in the **Status checks** table, as shown below:\n\n![status check table](https://about.gitlab.com/images/blogimages/how-to-status-checks/2-status-checks-table.png){: .shadow.small.center.wrap-text}\nStatus checks table\n{: .note.text-center}\n\n### External status check in action\n\nTo exercise the external status check for merge requests, we need to create a merge request. But before that, let's create an issue.\n\n1. Create an issue by clicking on **Issues > List** from the left vertical navigation menu to get to the Issues screen.\n\n2. Then click on the **New Issue** button\n\n3. On the **New Issue** window:\n\n3.1. In the Title field, enter \"External status check demo\"\n\n3.2. In the Description field, enter \"Issue to demonstrate an external status check\"\n\n3.3. Click on **Assign to me** next to the **Assignees** field\n\n3.4. Click on the **Create issue** button at the bottom of the window\n\n\u003C!--\n![issue create window](https://about.gitlab.com/images/blogimages/how-to-status-checks/3-issue-create-window.png){: .shadow.small.center.wrap-text}\nCreating an issue\n{: .note.text-center}\n-->\n\n\u003Cimg src=\"/images/blogimages/how-to-status-checks/3-issue-create-window.png\" width=\"75%\" height=\"75%\">\nCreating an issue\n{: .note.text-center}\n\nOnce the issue is created, you will be in the detail issue window.\n\n4. Click on the **Create merge request** button on the right hand side of the detailed issue window.\n\n![create a merge request](https://about.gitlab.com/images/blogimages/how-to-status-checks/4-create-merge-req.png){: .shadow.small.center.wrap-text}\nCreating a merge request\n{: .note.text-center}\n\nOnce the merge request is created, you will be in the detail merge request window.\n\n5. Click on the **Open in Web IDE** button on the right hand side of the detailed merge request window:\n\n![open webIDE](https://about.gitlab.com/images/blogimages/how-to-status-checks/5-open-webide.png){: .shadow.small.center.wrap-text}\nOpening the Web IDE\n{: .note.text-center}\n\n6. Make a minor update to the application. In the sample project **my-proj**, I modified two files: DemoApplication.java and DemoApplicationTests.java.\n\n6.1. In the DemoApplication.java class, I added the word \"today\" to the string returned by a call to this class:\n\n![update DemoApp](https://about.gitlab.com/images/blogimages/how-to-status-checks/6-update-demoapp.png){: .shadow.small.center.wrap-text}\nMaking a simple update to DemoApplication.java\n{: .note.text-center}\n\n6.2. In the DemoApplicationTests.java class, which is a unit test for DemoApplication.java, I also added the word \"today\" to the string in the *assertThat()* invocation to match the value returned by a call to the DemoApplication.java class:\n\n![update DemoAppTests](https://about.gitlab.com/images/blogimages/how-to-status-checks/7-update-demoapptests.png){: .shadow.small.center.wrap-text}\nMaking a simple update to DemoApplicationTests.java\n{: .note.text-center}\n\n7. Click on the **Commit…** button at the bottom of the Web IDE window. And then ensure to select the feature branch for the merge request before clicking on the **Commit** button again:\n\n\u003C!--\n![committing to feature branch](https://about.gitlab.com/images/blogimages/how-to-status-checks/8-click-commit.png){: .shadow.small.center.wrap-text}\nCommitting to the feature branch\n{: .note.text-center}\n-->\n\n\u003Cimg src=\"/images/blogimages/how-to-status-checks/8-click-commit.png\" width=\"30%\" height=\"30%\">\nCommitting to the feature branch\n{: .note.text-center}\n\n8. Go back to the merge request detail window by clicking on the merge request number on the bottom margin of the window:\n\n\u003C!--\n![click on merge request link](https://about.gitlab.com/images/blogimages/how-to-status-checks/9-click-mr-at-bottom.png){: .shadow.small.center.wrap-text}\nClicking on merge request link at bottom of window\n{: .note.text-center}\n-->\n\n\u003Cimg src=\"/images/blogimages/how-to-status-checks/9-click-mr-at-bottom.png\" width=\"75%\" height=\"75%\">\nClicking on merge request link at bottom of window\n{: .note.text-center}\n\n9. On the detail merge request window, scroll down until you see a section titled **Status checks 1 pending**. This is the merge request widget that lists all external status checks associated with merge requests. Click on the **Expand** button on the right hand side of this section:\n\n![expanding status checks widget](https://about.gitlab.com/images/blogimages/how-to-status-checks/10-click-on-expand.png){: .shadow.small.center.wrap-text}\nExpanding the status checks widget in the merge request\n{: .note.text-center}\n\n10. In the expanded section, you will see an entry for the external status check you defined above, whose name is *compliance-check*. Notice that to the left of its name, there is a pause symbol indicating to the merge request stakeholders that the check is still in progress and has not communicated its approval to the merge request yet:\n\n![list of status checks](https://about.gitlab.com/images/blogimages/how-to-status-checks/11-status-checks-widget-expanded.png){: .shadow.small.center.wrap-text}\nList of external status checks\n{: .note.text-center}\n\n11. In a real life scenario, the pause symbol would change to a green checkmark when the external status check communicates to GitLab that the compliance validation is finished, i.e. the merge request has been approved by the external service:\n\n![status checks passed](https://about.gitlab.com/images/blogimages/how-to-status-checks/12-status-check-passed.png){: .shadow.small.center.wrap-text}\nStatus checks that have passed\n{: .note.text-center}\n\n### How does an external status check inform GitLab that it has approved the merge request\n\nUsing an external status check integrates GitLab merge requests to a home-grown or SaaS application, for example, by invoking an API of this external system. Once this external system does its compliance validation or check, then it needs to inform GitLab that it has approved the merge request. To do this, the external system API must make use of the [GitLab external status checks API](https://docs.gitlab.com/ee/api/status_checks.html) to communicate to GitLab that the MR is approved. This is a 2-step process:\n\n1. The first step is to get the ID of the external status check you need to approve. Here is an example of how to invoke the GitLab API to do this:\n\n> curl --request GET --header \"PRIVATE-TOKEN: \u003Creplace with your GitLab API token>\" \"https://gitlab.com/api/v4/projects/28933616/merge_requests/1/status_checks\"\n\nAn example of what the command above will return follows:\n\n> [{\"id\":86,\"name\":\"compliance-check\",\"external_url\":\"https://tech-marketing-sandbox-cd-compvalidator.compliance.gitlabworkshops.io/validate\",\"status\":\"pending\"}]\n\nThe example return value above shows that the ID of the external status check that we’d like to approve is 86.\n\n> **NOTE:** Although I'm showing an example of how to invoke the GitLab API above using the *curl* command, the idea is that your external system API call would carry out any checks and validation and then it would assemble this message in a REST HTTP call back to GitLab to communicate its approval of the merge request.\n\n2. Once you have the ID of the external status check, you can then approve it by using the GitLab API. Here’s an example:\n\n> curl --request POST --header \"PRIVATE-TOKEN:\u003Creplace with your GitLab API token>\" \"https://gitlab.com/api/v4/projects/28933616/merge_requests/1/status_check_responses?sha=\u003Creplace with SHA at HEAD of the source branch>&external_status_check_id=86\"\n\nExecuting the REST API call above will approve the external status check on the GitLab merge request.\n\n```\nNOTE: to obtain the \u003CSHA at HEAD of the source branch>, here’s an example of the command you’d need to execute:\n\n$ git ls-remote https://gitlab.com/tech-marketing/sandbox/cd/my-proj.git\n\nThe URL in the preceding line is the URL to the git project for your merge request. And here’s an example of the output of the preceding command:\n\nad1eeee497c99466797a1155f514d3c0c2f0cc45\tHEAD\n9e209c8d409a0867c1df4e0965aa675277176137\trefs/heads/1-external-status-check-demo\nad1eeee497c99466797a1155f514d3c0c2f0cc45\trefs/heads/master\n9e209c8d409a0867c1df4e0965aa675277176137\trefs/merge-requests/1/head\n```\n\nIn the output above, the SHA for the feature branch associated with the merge request is *9e209c8d409a0867c1df4e0965aa675277176137*\n\n## What we've learned\n\nGitLab recently introduced \"external status checks for merge requests,\" which are effectively API calls to systems/application that sit outside GitLab. As you could see, with external status checks for merge requests, we were able to integrate GitLab with a third-party system that required manual approval for a merge request, ensuring that your application updates meet compliance and audit requirements.\n\nFor a demo of this feature in action, watch the video below:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/v4iY8qMvFLo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n",[232,727,894],{"slug":3429,"featured":6,"template":678},"how-to-status-checks","content:en-us:blog:how-to-status-checks.yml","How To Status Checks","en-us/blog/how-to-status-checks.yml","en-us/blog/how-to-status-checks",{"_path":3435,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3436,"content":3442,"config":3448,"_id":3450,"_type":16,"title":3451,"_source":17,"_file":3452,"_stem":3453,"_extension":20},"/en-us/blog/why-we-spent-the-last-month-eliminating-postgresql-subtransactions",{"title":3437,"description":3438,"ogTitle":3437,"ogDescription":3438,"noIndex":6,"ogImage":3439,"ogUrl":3440,"ogSiteName":692,"ogType":693,"canonicalUrls":3440,"schema":3441},"Why we spent the last month eliminating PostgreSQL subtransactions","How a mysterious stall in database queries uncovered a performance limitation with PostgreSQL.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669470/Blog/Hero%20Images/nessie.jpg","https://about.gitlab.com/blog/why-we-spent-the-last-month-eliminating-postgresql-subtransactions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we spent the last month eliminating PostgreSQL subtransactions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Grzegorz Bizon\"},{\"@type\":\"Person\",\"name\":\"Stan Hu\"}],\n        \"datePublished\": \"2021-09-29\",\n      }",{"title":3437,"description":3438,"authors":3443,"heroImage":3439,"date":3445,"body":3446,"category":14,"tags":3447},[3444,670],"Grzegorz Bizon","2021-09-29","\nSince last June, we noticed the database on GitLab.com would\nmysteriously stall for minutes, which would lead to users seeing 500\nerrors during this time. Through a painstaking investigation over\nseveral weeks, we finally uncovered the cause of this: initiating a\nsubtransaction via the [`SAVEPOINT` SQL query](https://www.postgresql.org/docs/current/sql-savepoint.html) while\na long transaction is in progress can wreak havoc on database\nreplicas. Thus launched a race, which we recently completed, to\neliminate all `SAVEPOINT` queries from our code. Here's what happened,\nhow we discovered the problem, and what we did to fix it.\n\n### The symptoms begin\n\nOn June 24th, we noticed that our CI/CD runners service reported a high\nerror rate:\n\n![runners errors](https://about.gitlab.com/images/blogimages/postgresql-subtransactions/ci-runners-errors.png)\n\nA quick investigation revealed that database queries used to retrieve\nCI/CD builds data were timing out and that the unprocessed builds\nbacklog grew at a high rate:\n\n![builds queue](https://about.gitlab.com/images/blogimages/postgresql-subtransactions/builds-queue.png)\n\nOur monitoring also showed that some of the SQL queries were waiting for\nPostgreSQL lightweight locks (`LWLocks`):\n\n![aggregated lwlocks](https://about.gitlab.com/images/blogimages/postgresql-subtransactions/aggregated-lwlocks.png)\n\nIn the following weeks we had experienced a few incidents like this. We were\nsurprised to see how sudden these performance degradations were, and how\nquickly things could go back to normal:\n\n![ci queries latency](https://about.gitlab.com/images/blogimages/postgresql-subtransactions/ci-queries-latency.png)\n\n### Introducing Nessie: Stalled database queries\n\nIn order to learn more, we extended our observability tooling [to sample\nmore data from `pg_stat_activity`](https://gitlab.com/gitlab-cookbooks/gitlab-exporters/-/merge_requests/231). In PostgreSQL, the `pg_stat_activity`\nvirtual table contains the list of all database connections in the system as\nwell as what they are waiting for, such as a SQL query from the\nclient. We observed a consistent pattern: the queries were waiting on\n`SubtransControlLock`. Below shows a graph of the URLs or jobs that were\nstalled:\n\n![endpoints locked](https://about.gitlab.com/images/blogimages/postgresql-subtransactions/endpoints-locked.png)\n\nThe purple line shows the sampled number of transactions locked by\n`SubtransControlLock` for the `POST /api/v4/jobs/request` endpoint that\nwe use for internal communication between GitLab and GitLab Runners\nprocessing CI/CD jobs.\n\nAlthough this endpoint was impacted the most, the whole database cluster\nappeared to be affected as many other, unrelated queries timed out.\n\nThis same pattern would rear its head on random days. A week would pass\nby without incident, and then it would show up for 15 minutes and\ndisappear for days. Were we chasing the Loch Ness Monster?\n\nLet's call these stalled queries Nessie for fun and profit.\n\n### What is a `SAVEPOINT`?\n\nTo understand `SubtransControlLock` ([PostgreSQL\n13](https://www.postgresql.org/docs/13/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW)\nrenamed this to `SubtransSLRU`), we first must understand how\nsubtransactions work in PostgreSQL. In PostgreSQL, a transaction can\nstart via a `BEGIN` statement, and a subtransaction can be started with\na subsequent `SAVEPOINT` query. PostgreSQL assigns each of these a\ntransaction ID (XID for short) [when a transaction or a subtransaction\nneeds one, usually before a client modifies data](https://gitlab.com/postgres/postgres/blob/a00c138b78521b9bc68b480490a8d601ecdeb816/src/backend/access/transam/README#L193-L198).\n\n#### Why would you use a `SAVEPOINT`?\n\nFor example, let's say you were running an online store and a customer\nplaced an order. Before the order is fullfilled, the system needs to\nensure a credit card account exists for that user. In Rails, a common\npattern is to start a transaction for the order and call\n[`find_or_create_by`](https://apidock.com/rails/v5.2.3/ActiveRecord/Relation/find_or_create_by). For\nexample:\n\n```ruby\nOrder.transaction do\n  begin\n    CreditAccount.transaction(requires_new: true) do\n      CreditAccount.find_or_create_by(customer_id: customer.id)\n  rescue ActiveRecord::RecordNotUnique\n    retry\n  end\n  # Fulfill the order\n  # ...\nend\n```\n\nIf two orders were placed around the same time, you wouldn't want the\ncreation of a duplicate account to fail one of the orders. Instead, you\nwould want the system to say, \"Oh, an account was just created; let me\nuse that.\"\n\nThat's where subtransactions come in handy: the `requires_new: true`\ntells Rails to start a new subtransaction if the application already is\nin a transaction. The code above translates into several SQL calls that\nlook something like:\n```sql\n--- Start a transaction\nBEGIN\nSAVEPOINT active_record_1\n--- Look up the account\nSELECT * FROM credit_accounts WHERE customer_id = 1\n--- Insert the account; this may fail due to a duplicate constraint\nINSERT INTO credit_accounts (customer_id) VALUES (1)\n--- Abort this by rolling back\nROLLBACK TO active_record_1\n--- Retry here: Start a new subtransaction\nSAVEPOINT active_record_2\n--- Find the newly-created account\nSELECT * FROM credit_accounts WHERE customer_id = 1\n--- Save the data\nRELEASE SAVEPOINT active_record_2\nCOMMIT\n```\n\nOn line 7 above, the `INSERT` might fail if the customer account was\nalready created, and the database unique constraint would prevent a\nduplicate entry. Without the first `SAVEPOINT` and `ROLLBACK` block, the\nwhole transaction would have failed. With that subtransaction, the\ntransaction can retry gracefully and look up the existing account.\n\n### What is `SubtransControlLock`?\n\nAs we mentioned earlier, Nessie returned at random times with queries\nwaiting for `SubtransControlLock`. `SubtransControlLock` indicates that\nthe query is waiting for PostgreSQL to load subtransaction data from\ndisk into shared memory.\n\nWhy is this data needed? When a client runs a `SELECT`, for example,\nPostgreSQL needs to decide whether each version of a row, known as a\ntuple, is actually visible within the current transaction. It's possible\nthat a tuple has been deleted or has yet to be committed by another\ntransaction. Since only a top-level transaction can actually commit\ndata, PostgreSQL needs to map a subtransaction ID (subXID) to its parent\nXID.\n\nThis mapping of subXID to parent XID is stored on disk in the\n`pg_subtrans` directory. Since reading from disk is slow, PostgreSQL\nadds a simple least-recently used (SLRU) cache in front for each\nbackend process. The lookup is fast if the desired page is already\ncached. However, as [Laurenz Albe discussed in his blog\npost](https://www.cybertec-postgresql.com/en/subtransactions-and-performance-in-postgresql/),\nPostgreSQL may need to read from disk if the number of active\nsubtransactions exceeds 64 in a given transaction, a condition\nPostgreSQL terms `suboverflow`. Think of it as the feeling you might get\nif you ate too many Subway sandwiches.\n\nSuboverflowing (is that a word?) can bog down performance because as\nLaurenz said, \"Other transactions have to update `pg_subtrans` to\nregister subtransactions, and you can see in the perf output how they\nvie for lightweight locks with the readers.\"\n\n### Hunting for nested subtransactions\n\nLaurenz's blog post suggested that we might be using too many\nsubtransactions in one transaction. At first, we suspected we might be\ndoing this in some of our expensive background jobs, such as project\nexport or import. However, while we did see numerous `SAVEPOINT` calls\nin these jobs, we didn't see an unusual degree of nesting in local\ntesting.\n\nTo isolate the cause, we started by [adding Prometheus metrics to track\nsubtransactions as a Prometheus metric by model](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/66477).\nThis led to nice graphs as the following:\n\n![subtransactions plot](https://about.gitlab.com/images/blogimages/postgresql-subtransactions/subtransactions-plot.png)\n\nWhile this was helpful in seeing the rate of subtransactions over time,\nwe didn't see any obvious spikes that occurred around the time of the\ndatabase stalls. Still, it was possible that suboverflow was happening.\n\nTo see if that was happening, we [instrumented our application to track\nsubtransactions and log a message whenever we detected more than 32\n`SAVEPOINT` calls in a given transaction](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/67918). Rails\nmakes it possible for the application to subscribe to all of its SQL\nqueries via `ActiveSupport` notifications. Our instrumentation looked\nsomething like this, simplified for the purposes of discussion:\n\n```ruby\nActiveSupport::Notifications.subscribe('sql.active_record') do |event|\n  sql = event.payload.dig(:sql).to_s\n  connection = event.payload[:connection]\n  manager = connection&.transaction_manager\n\n  context = manager.transaction_context\n  return if context.nil?\n\n  if sql.start_with?('BEGIN')\n    context.set_depth(0)\n  elsif cmd.start_with?('SAVEPOINT', 'EXCEPTION')\n    context.increment_savepoints\n  elsif cmd.start_with?('ROLLBACK TO SAVEPOINT')\n    context.increment_rollbacks\n  elsif cmd.start_with?('RELEASE SAVEPOINT')\n    context.increment_releases\n  elsif sql.start_with?('COMMIT', 'ROLLBACK')\n    context.finish_transaction\n  end\nend\n```\n\nThis code looks for the key SQL commands that initiate transactions and\nsubtransactions and increments counters when they occurred. After a\n`COMMIT,` we log a JSON message that contained the backtrace and the\nnumber of `SAVEPOINT` and `RELEASES` calls. For example:\n\n```json\n{\n  \"sql\": \"/*application:web,correlation_id:01FEBFH1YTMSFEEHS57FA8C6JX,endpoint_id:POST /api/:version/projects/:id/merge_requests/:merge_request_iid/approve*/ BEGIN\",\n  \"savepoints_count\": 1,\n  \"savepoint_backtraces\": [\n    [\n      \"app/models/application_record.rb:75:in `block in safe_find_or_create_by'\",\n      \"app/models/application_record.rb:75:in `safe_find_or_create_by'\",\n      \"app/models/merge_request.rb:1859:in `ensure_metrics'\",\n      \"ee/lib/analytics/merge_request_metrics_refresh.rb:11:in `block in execute'\",\n      \"ee/lib/analytics/merge_request_metrics_refresh.rb:10:in `each'\",\n      \"ee/lib/analytics/merge_request_metrics_refresh.rb:10:in `execute'\",\n      \"ee/app/services/ee/merge_requests/approval_service.rb:57:in `calculate_approvals_metrics'\",\n      \"ee/app/services/ee/merge_requests/approval_service.rb:45:in `block in create_event'\",\n      \"ee/app/services/ee/merge_requests/approval_service.rb:43:in `create_event'\",\n      \"app/services/merge_requests/approval_service.rb:13:in `execute'\",\n      \"ee/app/services/ee/merge_requests/approval_service.rb:14:in `execute'\",\n      \"lib/api/merge_request_approvals.rb:58:in `block (3 levels) in \u003Cclass:MergeRequestApprovals>'\",\n    ]\n  \"rollbacks_count\": 0,\n  \"releases_count\": 1\n}\n```\n\nThis log message contains not only the number of subtransactions via\n`savepoints_count`, but it also contains a handy backtrace that\nidentifies the exact source of the problem. The `sql` field also\ncontains [Marginalia comments](https://github.com/basecamp/marginalia)\nthat we tack onto every SQL query. These comments make it possible to\nidentify what HTTP request initiated the SQL query.\n\n### Taking a hard look at PostgreSQL\n\nThe new instrumentation showed that while the application regularly used\nsubtransactions, it never exceeded 10 nested `SAVEPOINT` calls.\n\nMeanwhile, [Nikolay Samokhvalov](https://gitlab.com/NikolayS), founder\nof [Postgres.ai](https://postgres.ai/), performed a battery of tests [trying to replicate the problem](https://gitlab.com/postgres-ai/postgresql-consulting/tests-and-benchmarks/-/issues/20).\nHe replicated Laurenz's results when a single transaction exceeded 64\nsubtransactions, but that wasn't happening here.\n\nWhen the database stalls occurred, we observed a number of patterns:\n\n1. Only the replicas were affected; the primary remained unaffected.\n1. There was a long-running transaction, usually relating to\nPostgreSQL's autovacuuming, during the time. The stalls stopped quickly after the transaction ended.\n\nWhy would this matter? Analyzing the PostgreSQL source code, Senior\nSupport Engineer [Catalin Irimie](https://gitlab.com/cat) [posed an\nintriguing question that led to a breakthrough in our understanding](https://gitlab.com/gitlab-org/gitlab/-/issues/338410#note_652056284):\n\n> Does this mean that, having subtransactions spanning more than 32 cache pages, concurrently, would trigger the exclusive SubtransControlLock because we still end up reading them from the disk?\n\n### Reproducing the problem with replicas\n\nTo answer this, Nikolay immediately modified his test [to involve replicas and long-running transactions](https://gitlab.com/postgres-ai/postgresql-consulting/tests-and-benchmarks/-/issues/21#note_653453774). Within a day, he reproduced the problem:\n\n![Nikolay experiment](https://about.gitlab.com/images/blogimages/postgresql-subtransactions/nikolay-experiment.png)\n\nThe image above shows that transaction rates remain steady around\n360,000 transactions per second (TPS). Everything was proceeding fine\nuntil the long-running transaction started on the primary. Then suddenly\nthe transaction rates plummeted to 50,000 TPS on the replicas. Canceling\nthe long transaction immediately caused the transaction rate to return.\n\n### What is going on here?\n\nIn his blog post, Nikolay called the problem [Subtrans SLRU overflow](https://v2.postgres.ai/blog/20210831-postgresql-subtransactions-considered-harmful#problem-4-subtrans-slru-overflow).\nIn a busy database, it's possible for the size of the subtransaction log\nto grow so large that the working set no longer fits into memory. This\nresults in a lot of cache misses, which in turn causes a high amount of\ndisk I/O and CPU as PostgreSQL furiously tries to load data from disk to\nkeep up with all the lookups.\n\nAs mentioned earlier, the subtransaction cache holds a mapping of the\nsubXID to the parent XID. When PostgreSQL needs to look up the subXID,\nit calculates in which memory page this ID would live, and then does a\nlinear search to find in the memory page. If the page is not in the\ncache, it evicts one page and loads the desired one into memory. The\ndiagram below shows the memory layout of the subtransaction SLRU.\n\n![Subtrans SLRU](https://about.gitlab.com/images/blogimages/postgresql-subtransactions/subtrans-slru.png)\n\nBy default, each SLRU page is an 8K buffer holding 4-byte parent\nXIDs. This means 8192/4 = 2048 transaction IDs can be stored in each\npage.\n\nNote that there may be gaps in each page. PostgreSQL will cache XIDs as\nneeded, so a single XID can occupy an entire page.\n\nThere are 32 (`NUM_SUBTRANS_BUFFERS`) pages, which means up to 65K\ntransaction IDs can be stored in memory. Nikolay demonstrated that in a\nbusy system, it took about 18 seconds to fill up all 65K entries. Then\nperformance dropped off a cliff, making the database replicas unusable.\n\nTo our surprise, our experiments also demonstrated that a single\n`SAVEPOINT` during a long-transaction [could initiate this problem if\nmany writes also occurred simultaneously](https://gitlab.com/gitlab-org/gitlab/-/issues/338865#note_655312474). That\nis, it wasn't enough just to reduce the frequency of `SAVEPOINT`; we had\nto eliminate them completely.\n\n#### Why does a single `SAVEPOINT` cause problems?\n\nTo answer this question, we need to understand what happens when a\n`SAVEPOINT` occurs in one query while a long-running transaction is\nrunning.\n\nWe mentioned earlier that PostgreSQL needs to decide whether a given row\nis visible to support a feature called [multi-version concurrency control](https://www.postgresql.org/docs/current/mvcc.html), or MVCC for\nshort. It does this by storing hidden columns, `xmin` and `xmax`, in\neach tuple.\n\n`xmin` holds the XID of when the tuple was created, and `xmax` holds the\nXID when it was marked as dead (0 if the row is still present). In\naddition, at the beginning of a transaction, PostgreSQL records metadata\nin a database snapshot. Among other items, this snapshot records the\noldest XID and the newest XID in its own `xmin` and `xmax` values.\n\nThis metadata helps [PostgreSQL determine whether a tuple is visible](https://www.interdb.jp/pg/pgsql05.html).\nFor example, a committed XID that started before `xmin` is definitely\nvisible, while anything after `xmax` is invisible.\n\n### What does this have to do with long transactions?\n\nLong transactions are bad in general because they can tie up\nconnections, but they can cause a subtly different problem on a\nreplica. On the replica, a single `SAVEPOINT` during a long transaction\ncauses a snapshot to suboverflow. Remember that dragged down performance\nin the case where we had more than 64 subtransactions.\n\nFundamentally, the problem happens because a replica behaves differently\nfrom a primary when creating snapshots and checking for tuple\nvisibility. The diagram below illustrates an example with some of the\ndata structures used in PostgreSQL:\n\n![Diagram of subtransaction handling in replicas](https://about.gitlab.com/images/blogimages/postgresql-subtransactions/pg-replica-subtransaction-diagram.png)\n\nOn the top of this diagram, we can see the XIDs increase at the\nbeginning of a subtransaction: the `INSERT` after the `BEGIN` gets 1,\nand the subsequent `INSERT` in `SAVEPOINT` gets 2. Another client comes\nalong and performs a `INSERT` and `SELECT` at XID 3.\n\nOn the primary, PostgreSQL stores the transactions in progress in a\nshared memory segment. The process array (`procarray`) stores XID 1 with\nthe first connection, and the database also writes that information to\nthe `pg_xact` directory. XID 2 gets stored in the `pg_subtrans`\ndirectory, mapped to its parent, XID 1.\n\nIf a read happens on the primary, the snapshot generated contains `xmin`\nas 1, and `xmax` as 3. `txip` holds a list of transactions in progress,\nand `subxip` holds a list of subtransactions in progress.\n\nHowever, neither the `procarray` nor the snapshot are shared directly\nwith the replica. The replica receives all the data it needs from the\nwrite-ahead log (WAL).\n\nPlaying the WAL back one entry at time, the replica populates a shared data\nstructure called `KnownAssignedIds`. It contains all the transactions in\nprogress on the primary. Since this structure can only hold a limited number of\nIDs, a busy database with a lot of active subtransactions could easily fill\nthis buffer. PostgreSQL made a design choice to kick out all subXIDs from this\nlist and store them in the `pg_subtrans` directory.\n\nWhen a snapshot is generated on the replica, notice how `txip` is\nblank. A PostgreSQL replica treats **all** XIDs as though they are\nsubtransactions and throws them into the `subxip` bucket. That works\nbecause if a XID has a parent XID, then it's a subtransaction. Otherwise, it's a normal transaction. [The code comments\nexplain the rationale](https://gitlab.com/postgres/postgres/blob/9f540f840665936132dd30bd8e58e9a67e648f22/src/backend/storage/ipc/procarray.c#L1665-L1681).\n\nHowever, this means the snapshot is missing subXIDs, and that could be\nbad for MVCC. To deal with that, the [replica also updates `lastOverflowedXID`](https://gitlab.com/postgres/postgres/blob/9f540f840665936132dd30bd8e58e9a67e648f22/src/backend/storage/ipc/procarray.c#L3176-L3182):\n\n```c\n * When we throw away subXIDs from KnownAssignedXids, we need to keep track of\n * that, similarly to tracking overflow of a PGPROC's subxids array.  We do\n * that by remembering the lastOverflowedXID, ie the last thrown-away subXID.\n * As long as that is within the range of interesting XIDs, we have to assume\n * that subXIDs are missing from snapshots.  (Note that subXID overflow occurs\n * on primary when 65th subXID arrives, whereas on standby it occurs when 64th\n * subXID arrives - that is not an error.)\n```\n\nWhat is this \"range of interesting XIDs\"? We can see this in [the code below](https://gitlab.com/postgres/postgres/blob/4bf0bce161097869be5a56706b31388ba15e0113/src/backend/storage/ipc/procarray.c#L1702-L1703):\n\n```c\nif (TransactionIdPrecedesOrEquals(xmin, procArray->lastOverflowedXid))\n    suboverflowed = true;\n```\n\nIf `lastOverflowedXid` is smaller than our snapshot's `xmin`, it means\nthat all subtransactions have completed, so we don't need to check for\nsubtransactions. However, in our example:\n\n1. `xmin` is 1 because of the transaction.\n2. `lastOverflowXid` is 2 because of the `SAVEPOINT`.\n\nThis means `suboverflowed` is set to `true` here, which tells PostgreSQL\nthat whenever a XID needs to be checked, check to see if it has a parent\nXID. Remember that this causes PostgreSQL to:\n\n1. Look up the subXID for the parent XID in the SLRU cache.\n1. If this doesn't exist in the cache, fetch the data from `pg_trans`.\n\nIn a busy system, the requested XIDs could span an ever-growing range of\nvalues, which could easily exhaust the 64K entries in the SLRU\ncache. This range will continue to grow as long as the transaction runs;\nthe rate of increase depends on how many updates are happening on the\nprmary. As soon as the transaction terminates, the `suboverflowed` state\ngets set to `false`.\n\nIn other words, we've replicated the same conditions as we saw with 64\nsubtransactions, only with a single `SAVEPOINT` and a long transaction.\n\n### What can we do about getting rid of Nessie?\n\nThere are three options:\n\n1. Eliminate `SAVEPOINT` calls completely.\n1. Eliminate all long-running transactions.\n1. Apply [Andrey Borodin's patches to PostgreSQL and increase the subtransaction cache](https://www.postgresql.org/message-id/flat/494C5E7F-E410-48FA-A93E-F7723D859561%40yandex-team.ru#18c79477bf7fc44a3ac3d1ce55e4c169).\n\nWe chose the first option because most uses of subtransaction could be\nremoved fairly easily. There were a [number of approaches](https://gitlab.com/groups/gitlab-org/-/epics/6540) we took:\n\n1. Perform updates outside of a subtransaction. Examples: [1](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/68471), [2](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/68690)\n1. Rewrite a query to use a `INSERT` or an `UPDATE` with an `ON CONFLICT` clause to deal with duplicate constraint violations. Examples: [1](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/68433), [2](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/69240), [3](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/68509)\n1. Live with a non-atomic `find_or_create_by`. We used this approach sparingly. Example: [1](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/68649)\n\nIn addition, we added [an alert whenever the application used a a single `SAVEPOINT`](https://gitlab.com/gitlab-com/runbooks/-/merge_requests/3881):\n\n![subtransaction alert](https://about.gitlab.com/images/blogimages/postgresql-subtransactions/subtransactions-alert-example.png)\n\nThis had the side benefit of flagging a [minor bug](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/70889).\n\n#### Why not eliminate all long-running transactions?\n\nIn our database, it wasn't practical to eliminate all long-running\ntransactions because we think many of them happened via [database\nautovacuuming](https://www.postgresql.org/docs/current/runtime-config-autovacuum.html),\nbut [we're not able to reproduce this yet](https://gitlab.com/postgres-ai/postgresql-consulting/tests-and-benchmarks/-/issues/21#note_669698320).\nWe are working on partitioning the tables and sharding the database, but this is a much more time-consuming problem\nthan removing all subtransactions.\n\n#### What about the PostgreSQL patches?\n\nAlthough we tested Andrey's PostgreSQL patches, we did not feel comfortable\ndeviating from the official PostgreSQL releases. Plus, maintaining a\ncustom patched release over upgrades would add a significant maintenance\nburden for our infrastructure team. Our self-managed customers would\nalso not benefit unless they used a patched database.\n\nAndrey's patches do two main things:\n\n1. Allow administrators to change the SLRU size to any value.\n1. Adds an [associative cache](https://www.youtube.com/watch?v=A0vR-ks3hsQ).\nto make it performant to use a large cache value.\n\nRemember that the SLRU cache does a linear search for the desired\npage. That works fine when there are only 32 pages to search, but if you\nincrease the cache size to 100 MB the search becomes much more\nexpensive. The associative cache makes the lookup fast by indexing pages\nwith a bitmask and looking up the entry with offsets from the remaining\nbits. This mitigates the problem because a transaction would need to be\nseveral magnitudes longer to cause a problem.\n\nNikolay demonstrated that the `SAVEPOINT` problem disappeared as soon as\nwe increased the SLRU size to 100 MB with those patches. With a 100 MB\ncache, PostgreSQL can cache 26.2 million IDs (104857600/4), far more\nthan the measely 65K.\n\nThese [patches are currently awaiting review](https://postgres.ai/blog/20210831-postgresql-subtransactions-considered-harmful#ideas-for-postgresql-development),\nbut in our opinion they should be given high priority for PostgreSQL 15.\n\n### Conclusion\n\nSince removing all `SAVEPOINT` queries, we have not seen Nessie rear her\nhead again. If you are running PostgreSQL with read replicas, we\nstrongly recommend that you also remove *all* subtransactions until\nfurther notice.\n\nPostgreSQL is a fantastic database, and its well-commented code makes it\npossible to understand its limitations under different configurations.\n\nWe would like to thank the GitLab community for bearing with us while we\niron out this production issue.\n\nWe are also grateful for the support from [Nikolay\nSamokhvalov](https://gitlab.com/NikolayS) and [Catalin\nIrimie](https://gitlab.com/cat), who contributed to understanding where our\nLoch Ness Monster was hiding.\n\nCover image by [Khadi Ganiev](https://www.istockphoto.com/portfolio/Ganiev?mediatype=photography) on [iStock](https://istock.com), licensed under [standard license](https://www.istockphoto.com/legal/license-agreement)\n",[704,1445,1979],{"slug":3449,"featured":6,"template":678},"why-we-spent-the-last-month-eliminating-postgresql-subtransactions","content:en-us:blog:why-we-spent-the-last-month-eliminating-postgresql-subtransactions.yml","Why We Spent The Last Month Eliminating Postgresql Subtransactions","en-us/blog/why-we-spent-the-last-month-eliminating-postgresql-subtransactions.yml","en-us/blog/why-we-spent-the-last-month-eliminating-postgresql-subtransactions",{"_path":3455,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3456,"content":3462,"config":3466,"_id":3468,"_type":16,"title":3469,"_source":17,"_file":3470,"_stem":3471,"_extension":20},"/en-us/blog/how-to-configure-sidekiq-for-gitlab-at-scale",{"title":3457,"description":3458,"ogTitle":3457,"ogDescription":3458,"noIndex":6,"ogImage":3459,"ogUrl":3460,"ogSiteName":692,"ogType":693,"canonicalUrls":3460,"schema":3461},"How to configure Sidekiq for specialized or large-scale GitLab instances","This tutorial unpacks how to configure Sidekiq that suits your GitLab deployment.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667068/Blog/Hero%20Images/sidekiqmountain.jpg","https://about.gitlab.com/blog/how-to-configure-sidekiq-for-gitlab-at-scale","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to configure Sidekiq for specialized or large-scale GitLab instances\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Craig Miskell\"}],\n        \"datePublished\": \"2021-09-27\",\n      }",{"title":3457,"description":3458,"authors":3463,"heroImage":3459,"date":3464,"body":3465,"category":14},[1463],"2021-09-27","\nConfiguring Sidekiq in your own deployment of GitLab is a little complicated, but entirely possible. In this blog post, we share how to set up Sidekiq for GitLab in special cases and at a large scale by sharing some exmaples that may be useful to you.\n\n## Why consider special configuration?\n\nWhile Sidekiq (both in general, and in a GitLab deployment) will usually _just work_ most of the time, there can be some sharp edges and limits. Raw scale is a clear and obvious driver for needing to take action, and although it may be fine to simply scale out multiple Sidekiq nodes each listening to all the queues, at some point:\n\n1. The uniqueness of workload distribution and job characteristics may require dedicated workers, either sharded on job attributes (as for GitLab.com), or specific workers (based on your workloads), or\n1. Simple saturation on Redis means you need to listen to fewer queues\n\n**[We share [all we learned about configuring Sidekiq on GitLab.com](/blog/specialized-sidekiq-configuration-lessons-from-gitlab-dot-com/)]**\n\n### Example: Demo systems\n\nIn early 2021, our Demo Systems team were running a GitLab deployment for training purposes. Many users would join a training session where the first task was to import a sample project into the provided GitLab instance to work on further during the class. Imports are implemented with a Sidekiq job because they can take anything from a few seconds to hours. What the Demo Systems team found was that the default Sidekiq configuration simply couldn't keep up. The deployment wasn't huge, and neither was the user count, it was the very specific usage of the system that ran into difficulties. So, the team split off a dedicated Sidekiq VM for running imports, with suitably tuned concurrency (based on CPU contention), CPU + memory, and number of workers.\n\n**[[Discover how we scaled our use of Sidekiq on a GitLab instance](/blog/scaling-our-use-of-sidekiq/)]**\n\nThe key lesson here is that large scale isn't always the driver for customizing Sidekiq configuration, and the reason may be specific to your workloads, which means first you have to be able to identify the pain points.\n\n### Using metrics to identify problems\n\n{: #using-metrics-to-identify-problems}\nUser experience may tell you something isn't going well, but how do you tell where the actual problem lies? The GitLab UI exposes the Sidekiq UI to administrators, at `/admin/background_jobs` – in the 'Queues' tab, you can see how many jobs are currently pending, and a breakdown by queue. However, that is a snapshot of a point-in-time, and stored metrics/graphs are better for long term visibility, particularly for figuring out what happened an hour ago when someone reported slow pipelines, or to debug that thing that happens twice a day but never when anyone is watching.\n\nTo get some visibility, consider installing [gitlab-exporter](https://gitlab.com/gitlab-org/gitlab-exporter/) on (or pointed to) your Redis nodes, with:\n\n* `probe_queues` enabled to get the `sidekiq_queue_size` metric, and/or\n* `probe_jobs_limit` to get `sidekiq_enqueued_jobs`.\n\n`sidekiq_queue_size` reports the length of the all the Sidekiq queues in Redis (equivalent to the data exposed by the Sidekiq UI), but now it's exposed as a Prometheus metric for scraping and graphing. `sidekiq_enqueued_jobs` deserializes the job descriptions as well, meaning it can look inside a routing rule-based named queue with more than one class of jobs in it, and report the distribution of work by class. It has to limit (hence the name) the inspection to the first 1000 jobs in any given queue to contain the potential impact of blocking Redis with many calls to [LRANGE](https://redis.io/commands/lrange) with large responses. Usually this situation is fine. If you have > 1000 jobs in any given queue for a non-trivial amount of time, just knowing what's at the head of the queue is likely sufficient and `sidekiq_queue_size` will still show you the full magnitude of the backlog.\n\nIf we were to really simplify it - because there are always exceptions - both those metrics should be at or close to 0 most of the time. In practice, there's often small, brief spikes when batches of work land and cannot be processed immediately, and it may be quite acceptable for some large/slow jobs to be queued for some significant time (e.g., project exports). But a prolonged backlog (or perpetual growth) indicates some class of work is not being processed, either at all, or \"fast enough\" to keep up. If your team is encountering these problems, it might be time to customize your Sidekiq configuration.\n\nHowever the backlog in queues may not be the whole story – queuing might be occurring because all your Sidekiq workers are busy with long-running jobs, causing all the other jobs to stall. To observe that you need the `sidekiq_running_jobs` metric, which can be scraped from the [sidekiq exporter](https://docs.gitlab.com/ee/administration/monitoring/prometheus/gitlab_metrics.html#sidekiq-metrics). This is enabled by default on port 8082 for Omnibus, and 3807 in Kubernetes when using our helmcharts. Graphing `sum by (worker) (sidekiq_running_jobs)` will show you what your Sidekiq workers are actively up to right now, and may highlight which worker/queue is causing the problem.\n\nConsider also keeping an eye on your Redis CPU usage – on a modern CPU at smaller scales there's a lot of headroom, but if you're at the point of considering a specialized Sidekiq configuration, now is the time to add a little monitoring and alerting so it doesn't sneak up on you in the future. We use [Process Exporter](https://github.com/ncabatoff/process-exporter) inspecting the `redis-server` process, with `threads=true` (on the command line) to get per-thread details. In Prometheus we use `sum by (threadname) (rate(namedprocess_namegroup_thread_cpu_seconds_total[1m]))`. On Redis 6, the core thread is named 'redis-server'. As always, set your alert level so that you won't get false positives, but will have plenty of headway before saturation becomes a real problem.\n\n### How to customize your Sidekiq configuration\n\nAfter identifying one or more queues/workers that are backed up, the main task is to get more Sidekiq processing power deployed. As mentioned above, it may be sufficient to simply add one or more [Sidekiq nodes](https://docs.gitlab.com/ee/administration/sidekiq/index.html) or Sidekiq workload in Kubernetes, allowing you to listen to all the queues in a default configuration. If you choose this approach, make sure you're keeping an eye on Redis CPU per the [metrics](#using-metrics-to-identify-problems) above.\n\nAn alternative is to choose to provision some dedicated Sidekiq processing for just the problem work. It could even be said that any complex configuration of Sidekiq for GitLab is just the result of a series of these decisions, progressively adding dedicated processing for specific workloads with a \"catchall\" or \"default\" workload picking up the rest, so I'll describe just one such step and you can take it as far as you need.\n\nThere is a critical decision to make first, and that's whether to:\n\n1. use queue-selectors on the workers and continue with a queue per worker for all jobs, or\n1. use routing rules.\n\nAnd if using routing rules, decide whether to:\n\n1. Go entirely to one-queue-per-shard, or\n1. Use a mix of custom-named queues and the default worker-named queues.\n\nHaving worked in this area for a little over a year now, **I strongly recommend using routing rules and one-queue-per-shard** for the following reasons:\n\n1. Routing rules are more obvious in their effect/ordering than trying to configure disjointed sets of queues across Sidekiq workloads,\n1. Correlating the target queue names in routing rules with the names of queues listened to by workers is simpler,\n1. There is *far* less complexity in configuring the default/catchall workers,\n1. Load on Redis is significantly reduced with fewer named queues.\n\nIt may be easier to see why with an example. In the next section, we run through an example where we assume that you want to provide dedicated resources for `project_exports` because it sees heavy use, and Sidekiq is regularly spending all it's time on that. We'll skip the early phase and assume that you have identified from metrics that the queue name is project_export.\n\n#### Using queue-selectors only\n\nLet's say you want to continue to use one queue per worker and configure each Sidekiq workload to listen to a subset of jobs using queue selectors. The syntax and location for configuring queue selectors is available in our documentation under [Queue selector](https://docs.gitlab.com/ee/administration/sidekiq/extra_sidekiq_processes.html) and [Worker matcher query](https://docs.gitlab.com/ee/administration/sidekiq/processing_specific_job_classes.html) sections.\n\nAfter creating your new, dedicated Sidekiq workload, configure this in `gitlab.rb` on that workload:\n\n```ruby\nsidekiq['enable'] = true\nsidekiq['queue_selector'] = true\nsidekiq['queue_groups'] = [ 'name=project_export' ]\n```\n\nKeep in mind that this will only run one Sidekiq process which, while multithreaded with one job potentially executing on each thread, can only use one CPU – read up on [multiple processes](https://docs.gitlab.com/ee/administration/sidekiq/extra_sidekiq_processes.html) and [concurrency threading](https://about.gitlab.com/blog/specialized-sidekiq-configuration-lessons-from-gitlab-dot-com/) for a little more detail, but in short, if you had a 4 CPU VM and you wanted to run 4 project_export processes, you'd configure gitlab.rb like this:\n\n```ruby\nsidekiq['enable'] = true\nsidekiq['queue_selector'] = true\nsidekiq['queue_groups'] = [ 'name=project_export', 'name=project_export', 'name=project_export', 'name=project_export' ]\n```\n\nThis also reveals another approach. If your existing workload is running somewhere with spare CPU you could add processes with different sets of queues, gaining some control of workload prioritization without having to deploy new compute resources. For example:\n\n```ruby\nsidekiq['enable'] = true\nsidekiq['queue_selector'] = true\nsidekiq['queue_groups'] = [ 'name=project_export', 'name!=project_export' ]\n```\n\nThis may look a little odd at first glance, but it means that one process will be listening to `project_export`, and the other will be listening to every queue that _isn't_ project_export.\n\nA couple of caveats:\n\n1. Concurrency (threading) is set once in `gitlab.rb`, so all jobs running on that node will need to be compatible with that concurrency. Read up on [Concurrency (threading) in the previous blog post](/blog/specialized-sidekiq-configuration-lessons-from-gitlab-dot-com/) to learn more.\n1. Using the GitLab helmcharts, each pod only runs one process, so there you'd adjust maxReplicas instead.\n\nSpeaking of helmcharts, these have the queue-selector configured with the [`queues`](https://docs.gitlab.com/charts/charts/gitlab/sidekiq/#queues) attribute of the pod:\n\n```\nqueues: name=project_export\n```\n\nWhere, despite being named `queues`, it can take the full queue-selector expression.\n\nAfter these configurations, your new workload will be listening exclusively to the `project_export` queue/worker. But what is to stop your original workload from also running `project_export`? Absolutely nothing! A default/baseline workload of Sidekiq for GitLab will listen on all the queues. This **may** be acceptable in a simple case – you've provided additional capacity dedicated to the named queue, and occasionally those jobs will still run on the original Sidekiq. In practice, because of the way Sidekiq uses BRPOP with a randomized order of queues, and how Redis distributes work when clients are already waiting on a named queue, the new dedicated workload will most likely pick up the **vast** majority of the work on that queue. But this may not isolate problem work as much as you desire. This could also lead to difficulty in reasoning clearly about what the system is going to do as your customization grows and becomes more specific. Therefore, I strongly recommend that you ensure the sets of queues are disjoint (that is, no overlap). The final step is to configure your original/default Sidekiq with either:\n\n```ruby\nsidekiq['enable'] = true\nsidekiq['negate'] = true\nsidekiq['queue_selector'] = true\nsidekiq['queue_groups'] = [ 'name=project_export' ]\n\n```\n\nor\n\n```ruby\nsidekiq['enable'] = true\nsidekiq['queue_selector'] = true\nsidekiq['queue_groups'] = [ \"name!=project_export\" ]\n```\n\nThen, as you add more customized workloads in future steps, you would extend the expression to exclude the work that is being picked up elsewhere, e.g., in the negate case if you had added a further workload executing only `feature_category=importers`:\n\n```ruby\nsidekiq['negate'] = true\nsidekiq['queue_groups'] = [ 'name=project_export&feature_category=importers' ]\n```\n\nThis is where setting `negate` to \"true\" can be better – this catchall/default expression can be a simple concatenation of the expressions used on every other workload, separated with `&`. The expression may end up complex, but it can be generated trivially with code. Not using negate and inverting the operators works for simple cases, but may run into difficulty expressing edge cases when the individual expressions become more nuanced or complicated.\n\n#### Use routing rules\n\nAnother option is to use [routing rules](https://docs.gitlab.com/ee/administration/sidekiq/processing_specific_job_classes.html) to achieve the same thing. First, add a new Sidekiq workload configured with:\n\n```ruby\nsidekiq['enable'] = true\nsidekiq['queue_selector'] = false # This is the default and is included only to be explicit\nsidekiq['queue_groups'] = [ 'export' ]\n```\n\nAs in the queue-selector approach, you can run more than one by repeating the expression in queue_groups:\n\n```ruby\nsidekiq['queue_groups'] = [ 'export', 'export', 'export', 'export' ]\n```\n\nWhen using [helm charts](https://docs.gitlab.com/charts/charts/gitlab/sidekiq/index.html#queues) it would be simply the following in the Sidekiq pod definition:\n\n```yaml\nqueues: name=export\n```\n\nThis is simply explicitly naming queues, but having made up an arbitrary named \"export\" rather than using a queue name derived from the job class. Next, and most importantly, add the following to `gitlab.rb` on **all** your workloads. In the queue-selector approach, we only had to configure the Sidekiq workload, but here we need to ensure that **everywhere that enqueues Sidekiq jobs has the routing rules** – meaning anywhere running the Rails portion of GitLab, i.e., puma (web) as well as Sidekiq:\n\n```ruby\nsidekiq['routing_rules'] = [\n  ['name=project_export', 'export'],\n  ['*', nil]\n]\n```\n\nAnd when using [helmcharts](https://docs.gitlab.com/charts/charts/globals.html#sidekiq-routing-rules-settings) deployment:\n\n```yaml\nglobal:\n  appConfig:\n    sidekiq:\n      routingRules:\n      - [\"name=project_export\", \"export\"]\n      - [\"*\", null]\n```\n\nSome caveats:\n\n1. You most likely want a workload listening to the new queue **before** reconfiguring the routing rules, otherwise jobs will be put into the queue with nothing ready to execute them.\n1. The destination name (`export`) is arbitrary, but must match exactly in Sidekiq queue configuration and the routing rules.\n1. In `gitlab.rb` we use \"nil\", but in YAML we must use \"null\".\n\nBy using null/nil as the target for `*` this example continues to use the default worker-per-queue strategy for all the other jobs. So you will have gained routing/prioritization control, but Redis will still be doing a lot of work to listen to the other 440+ queues. To avoid that, you can change the target of the final `*` routing rule to \"default\", e.g.\n\n```ruby\nsidekiq['routing_rules'] = [\n  ['name=project_export', 'export'],\n  ['*', 'default']\n]\n```\n\nIn this context \"default\" is literal. Conveniently there is a built-in 'default' queue that GitLab Sidekiq listens to, although nothing uses it out of the box. These rules will route all remaining jobs to that queue and the original/default Sidekiq workload will pick them up immediately. Then, at your convenience, you can reconfigure the original Sidekiq workload to listen **only** to \"default\" in the same way you configured the new workload to listen to \"export\", and gain the performance benefit in Redis.\n\n#### Edge cases\n\nThe routing rules example above is simplified slightly for clarity. In practice there are still a small set of queues that need to remain in their **original** dedicated named queue for a variety of reasons. We're working on resolving the blockers, but that may take a while to work through. You can follow along in [this issue](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1087), or you can keep an eye on the routingRules [configuration for GitLab.com](https://gitlab.com/gitlab-com/gl-infra/k8s-workloads/gitlab-com/-/blob/master/releases/gitlab/values/gprd.yaml.gotmpl) – special cases will be at the very top of the rules, routed by worker_name or name, and there will be a comment about why and a link to any related issues, which will help you determine if each is relevant to your needs. Some special cases may be there for GitLab.com-specific reasons and may not be generally applicable. In the long term we expect the list of special cases to reduce, not increase.\n\nAlso take into consideration that the special cases may be used for features that you do not use. Specifically:\n\n1. EmailReceiverWorker & ServiceDeskEmailReceiverWorker are for [Incoming email](https://docs.gitlab.com/ee/administration/incoming_email.html)\n1. ProjectImportScheduleWorker is for project mirroring\n\nSo you might be able to just ignore them, or route them to a queue that no worker is listening to and alert if `sidekiq_queue_size` is above zero on those queues.\n\n### Migrating when using routing rules\n\nThere is one more thing to note. When migrating an active GitLab deployment (rather than configuring this from scratch on a fresh GitLab deployment) the order of steps taken is important, and there's one additional step I haven't mentioned yet:\n\n1. Ensure a Sidekiq workload is listening to the new queues\n1. Change the routing rules\n1. Run the Sidekiq job migration [Rake task](https://docs.gitlab.com/ee/administration/sidekiq/sidekiq_job_migration.html)\n   * Any jobs that are scheduled for the future will be migrated to the new queue for correct execution\n1. Stop listening to queues that are no longer in use\n\nThese steps will ensure a clean migration. If you do not do step 3, then at future times deferred jobs will be picked up out of their holding place in Redis and might be scheduled to a queue that no Sidekiq is listening to anymore. This is exactly the process we took on GitLab.com when migrating our configuration to one queue per shard.\n\n## Simplifying complex Sidekiq configurations\n\nAny complicated Sidekiq configuration can be broken down into a series of these individual migrations, identifying (using metrics) queues or workers that need specialized handling, spinning up a workload to run them, and then sending/routing the jobs to this new workload.\n\nCover image by [Jerry Zhang](https://unsplash.com/@z734923105) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",{"slug":3467,"featured":6,"template":678},"how-to-configure-sidekiq-for-gitlab-at-scale","content:en-us:blog:how-to-configure-sidekiq-for-gitlab-at-scale.yml","How To Configure Sidekiq For Gitlab At Scale","en-us/blog/how-to-configure-sidekiq-for-gitlab-at-scale.yml","en-us/blog/how-to-configure-sidekiq-for-gitlab-at-scale",{"_path":3473,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3474,"content":3479,"config":3486,"_id":3488,"_type":16,"title":3489,"_source":17,"_file":3490,"_stem":3491,"_extension":20},"/en-us/blog/best-practices-customer-feature-request",{"title":3475,"description":3476,"ogTitle":3475,"ogDescription":3476,"noIndex":6,"ogImage":2478,"ogUrl":3477,"ogSiteName":692,"ogType":693,"canonicalUrls":3477,"schema":3478},"How to incorporate private customer needs into a public product roadmap","We've had lots of experience documenting and tracking private customer feature requests effectively. Here's our best advice and how to get the most out of GitLab issues and issue trackers.","https://about.gitlab.com/blog/best-practices-customer-feature-request","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to incorporate private customer needs into a public product roadmap\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christina Hupy, Ph.D.\"},{\"@type\":\"Person\",\"name\":\"Neil McCorrison\"}],\n        \"datePublished\": \"2021-09-23\",\n      }",{"title":3475,"description":3476,"authors":3480,"heroImage":2478,"date":3483,"body":3484,"category":14,"tags":3485},[3481,3482],"Christina Hupy, Ph.D.","Neil McCorrison","2021-09-23","\n\nEffectively communicating a customer’s private needs to product teams is essential to a product’s success, but it can be a tricky undertaking.\n\nTeams can face several challenges in communicating and tracking customers' requests, including protecting customer confidentiality, tracking priority and progress, and making sure the product team is getting actionable feedback that can be incorporated into product milestones.\n\nThis blog post shares GitLab's best practices and lessons learned, as well as a video conversation between GitLab CEO [Sid Sijbrandij](/company/team/#sytses) and Fleet CEO [Mike McNeil](https://www.linkedin.com/in/mikermcneil/).\n\nIn line with GitLab's [open core model](/company/stewardship/) and [transparency value](https://handbook.gitlab.com/handbook/values/#transparency), our product roadmap is public and the product team uses [public issue trackers](/gitlab-com/Product/-/issues) for feature requests and to plan the work. Because the issues are public, customers and community members can see how the product team works, what direction we are headed, and what the priorities are. Contributors can even decide to create a feature themselves.\n\nEver wonder what a DevOps Platform could do for your team? [Here's what you need to know](/solutions/devops-platform/)\n\nWhen a customer indicates a feature request to a technical account manager (TAM), the manager searches for the relevant open feature request in the product teams' issue tracker and adds a comment with generic details about the customer such as number of users and product. If an issue for that feature request does not already exist, the technical account manager can create an issue with the [Feature Proposal](https://gitlab.com/gitlab-org/gitlab/-/issues/new?issuable_template=Feature%20Proposal%20-%20lean) issue template then and add the customer’s request as a comment.\n\nFor example, the comment should include the following:\n\n> Hello `@product-manager`,  an Ultimate customer with 1500 users (`salesforce-link`) would like to see this feature prioritized, ideally within the next 6 months. They need this feature in order to X, which is important to them because Y, and they do not currently have a workaround. Additionally, releasing this feature would result in an estimated 250 additional users.\n\nThe TAM includes a link to the account in the customer relationship management system (CRM), in GitLab’s case Salesforce, so the internal teams can view the details. We even have a [feedback template](/handbook/product/how-to-engage/#feedback-template) to ensure the proper details are captured in the comment. The comment is public but the record in the CRM is private.\n\nThe product manager reviews the request and responds. Relevant [labels](/handbook/customer-success/csm/product/#priority-of-feature-requests) are added based on priority. For example, labels include **critical requests**, **high-priority requests**, **low priority requests** or **promised to a customer**. Milestones can be assigned to track timelines and make sure the feature ships on time. The feature tracking issue should be maintained regularly and acts as the single source of truth on the customer needs. These issues can also be reviewed for metrics on previously delivered feature requests.\n\n**Elevating your DevOps skills? Join us at [Commit at KubeCon - Oct. 11!](/events/commit/)**\n\nIn this case, a noisy feature request issue with comments from customers is a good thing. It helps the product manager directly see where the action is and how customers would benefit, and it also helps when prioritizing what feature ships next. Seeing direct input from the customers provides context and also creates developer empathy and connection with the end user. Additional team members, including [solution architects](https://handbook.gitlab.com/job-families/sales/solutions-architect/) find it useful to subscribe to these issues, keeping them automatically updated on progress and discussion by the product team.\n\n**Getting the product team involved early on is essential** to the success of this workflow. Another essential element is that the CSMs bring their customers'feedback directly to the issue where the work is being planned and prioritized.\n\n**Contributing to GitLab:**   Once a product manager has triaged an issue and applied the appropriate [Product Development Workflow](/handbook/product-development-flow/) labels, it may be deemed that the feature is ready for the customer or community to help build the feature directly. Our motto is \"Everyone Can Contribute\", and the ~\"Accepting Merge Requests\" label ([handbook](/handbook/engineering/quality/triage-operations/#sts=Accepting%20merge%20requests)) is a great way to identify when a feature is ready for a community contribution. Customers who wish to contribute back to GitLab can ask for a [Merge Request Coach](https://handbook.gitlab.com/job-families/expert/merge-request-coach/) to help guide them through the process to ensure timely review and alignment with our engineering best practices.\n\nGitLab learned early on that creating a separate issue for customer feedback can get complicated and ends up being disjointed from where the product managers are doing their work.\n\nIn summary, best practices for delivering customer feature requests to the product team include:\n\n* Ensure the feedback is directly where the product managers are working and prioritizing features.\n* Provide only generic details on the customer with a link to internal confidential information, but provide as much detail as possible regarding the customer's use case and business need.\n* Share the feature request issue back with the customer. If they feel inclined, they can comment and add details. This builds trust between the customer, their account team, and the product team.\n* Labels and milestones are essential for tracking. If something is critical to the customer, make sure the labels and milestones indicate as such.\n* The feature request issue should act as the single source of truth for the customers' needs; aggregating this information elsewhere results in a disconnect between the need and the work.\n\nWatch the full discussion between Sid and Mike:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/JH2cFhoUzsI\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\nSid discussing GitLab's best practices on tracking customer feedback with Fleet CEO Mike McNeil\n{: .note}\n\n",[727,749,916,1347],{"slug":3487,"featured":6,"template":678},"best-practices-customer-feature-request","content:en-us:blog:best-practices-customer-feature-request.yml","Best Practices Customer Feature Request","en-us/blog/best-practices-customer-feature-request.yml","en-us/blog/best-practices-customer-feature-request",{"_path":3493,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3494,"content":3500,"config":3506,"_id":3508,"_type":16,"title":3509,"_source":17,"_file":3510,"_stem":3511,"_extension":20},"/en-us/blog/gitlab-together-with-terratag-open-source-to-help-you-manage-terraform-resources",{"title":3495,"description":3496,"ogTitle":3495,"ogDescription":3496,"noIndex":6,"ogImage":3497,"ogUrl":3498,"ogSiteName":692,"ogType":693,"canonicalUrls":3498,"schema":3499},"How to use Terratag to manage Terraform tags automatically","This blog addresses how you can do that easily and automatically when using Terraform and Terratag (an open source project by env0) on top of the Gitlab CI/CD platform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682137/Blog/Hero%20Images/blog-image.png","https://about.gitlab.com/blog/gitlab-together-with-terratag-open-source-to-help-you-manage-terraform-resources","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use Terratag to automatically manage tags and labels for your Terraform Code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2021-09-14\",\n      }",{"title":3501,"description":3496,"authors":3502,"heroImage":3497,"date":3503,"body":3504,"category":14,"tags":3505},"How to use Terratag to automatically manage tags and labels for your Terraform Code",[1019],"2021-09-14","\n\nWhen using infrastructure as code (IaC) on a public cloud provider, it's important to use tags and labels to organize your IaC using their complementary services. Terratag, an open source project developed by [env0](http://www.env0.com), can be used with Terraform and placed on top of the GitLab CI/CD platform, making tagging and labeling IaC easier and more efficient.\n\nGitLab and Terraform make it easy to tag and label infrastructure as code.\n\n## Inside your toolbox\n\n[GitLab](https://about.gitlab.com/) the industry's leading DevOps platform. Not long ago, we announced the ability to control Terraform deployments, remote state management, private module registry, and merge request integration for Terraform. This gives users a range of solutions for running CI/CD for Terraform code and managing it on a large scale.\n\n[Terraform](https://www.terraform.io/) is the most widely adopted IaC framework out there. It's an open source project that is maintained by HashiCorp, and was launched in 2014 based on HashiCorp configuration language (HCL). Terraform is a command line (CLI) tool that can help manage and provision external resources such as public cloud infrastructure, private cloud infrastructure, network appliances, and SaaS and PaaS vendors. All major clouds are supported where AWS, Azure, and GCP have an official provider that are maintained internally by the HashiCorp Terraform team.\n\nAll major cloud providers support tagging/labeling for most of their resources using their Terraform provider, to help users manage infrastructure more efficiently. In this blog post, we provide some examples that show how it is easy to tag and label your IaC using Terratag with GitLab CI/CD – a core component of our DevOps platform.\n\n### How to automatically manage tags/labels for your Terraform Code\n\nFirst, we'll take a deep dive into the importance of tagging and labeling your IaC when using a public cloud provider. Next, we'll explain how to manage tags and labels for your IaC easily and automatically when using Terraform and [Terratag](https://terratag.io/) on top of the Gitlab CI/CD platform, with simple code examples for an end-to-end solution.\n\n### Why tags/labels are so important\n\nAll major cloud providers allow tagging (or labeling) cloud resources. Moreover, they encourage you to use tags or labels to do things like manage budgets, set up powerful automation algorithms, and unlock insights offered by the cloud providers and independent third parties.\n\nBy harnessing powerful IaC frameworks like Terraform, users can define and tag cloud resources for verticals ranging from the development to ops, as well as business needs.\n\n### The problem with tagging today\n\nTagging is a manual process, which can make it a real hassel, particularly as your infrastructure grows. Repeatedly tagging dozens or even hundreds of cloud resources is inefficient, but that's just the start of the problems. Manually tagging fails in other important ways too:\n\n* **Standards are hard to maintain if they're not enforced**: Your entire team needs to be on the same page – keeping an eye out for newly added cloud resources, making sure they include those tags or you may miss some significant resources when acting on that metadata later.\n\n* **Harder to change**: Applying changes to tag structure across the board quickly becomes unmanageable.\n\n* **Metadata can obscure what's important**: While tagging all this metadata is useful for slicing and dicing later, having it everywhere on your resources pollutes your IaC, making it more cumbersome and harder to maintain.\n\n* **Migration**: What if you already have plenty of Terraform modules with cloud resources, which weren't tagged to begin with? Trying to tag them all now can be painstaking work.\n\nAt the end of the day, IaC is, well, just code, and as is the case with any code, repetition makes it harder to fix errors, apply enhancements, make adjustments and maintain readability. As tagging is a cross-cutting concern, the lack of proper layering or aspect control makes it harder to retrofit existing solutions.\n\n### Terratag to the rescue\n\n[Terratag](https://terratag.io/) allows the user to automatically tag or label all the resources in their Terraform code. It also automatically tags all of your Terraform sub-modules, even if they don't expose tags as an input. Terratag is a CLI tool that works with all the major cloud providers including AWS, Google Cloud Platform, and Microsoft Azure, and solves the complicated problem of tagging resources across applications at scale. It eliminates the risk of human error, can retroactively tag IaC resources that were previously deployed, and helps you easily use the tags for various purposes, like cost management, organization, reporting, etc.\n\n### How to run Terraform with GitLab\n\nGitlab offers a wide range of tools for Terraform, starting with a [managed remote state](https://docs.gitlab.com/ee/user/infrastructure/terraform_state.html), running your deployment with [Gitlab CI/CD](https://docs.gitlab.com/ee/ci/), [Terraform private module registry](https://docs.gitlab.com/ee/user/packages/terraform_module_registry/index.html#publish-a-terraform-module-by-using-cicd) and [integration in Merge Requests (MRs)](https://docs.gitlab.com/ee/user/infrastructure/mr_integration.html) and getting Terraform plan output information into an MR.\n\nIn this tutorial, we use Gitlab CI/CD to deploy a Terraform repository into Google Cloud Platform and let Gitlab manage our remote state.\n\n### Combining Terraform wtih GitLab in GCP\n\nWe explain how to implement and combine Terraform and GitLab with ease, starting with building the deployment of our Terraform code using GitLab and then see the results in Google Cloud platform.\n\n### Terraform code with GitLab as a backend\n\nWe're using Terraform to deploy a simple VPC and a VM into GCP. We will use GitLab Terraform backend configuration, which is based on the Terraform [HTTP backend](https://www.terraform.io/docs/language/settings/backends/http.html). The beauty of this configuration is that you don't need to add any configuration regarding authentication when running it inside Gitlab CI/CD. GitLab will automatically set up all the relevant configuration for your backend according to the project it's running in.\n\nThe code is available in [the Terratag project created for this blog post](https://gitlab.com/env0/terratag-blog-post/-/tree/main).\n\n### Set up variables\n\nThis Terraform code needs some variables in order to run. We can set these up using Gitlab CI/CD variables. Under your Gitlab Project, go to Settings > CI/CD and expand the variable section. We will need to add three variables:\n\n* `GOOGLE_CREDENTIALS`: This variable value should be the JSON of your Google Cloud service account. [See this documentation](https://cloud.google.com/iam/docs/creating-managing-service-account-keys) on how to create a service account key.\n\n* `TF_VAR_project_id`: Your Google Cloud project ID.\n\n* `TF_VAR_machine_type`: The VM type you would like to create.\n\n![tg_1](https://about.gitlab.com/images/blogimages/2021-terratag-env0/tg_1.png)\n\n### Set up Gitlab CI/CD\n\nSetting up a Gitlab CI/CD for Terraform is really easy – all you need to do is add a simple file in your repository called `.gitlab-ci.yml` and add a configuration for each step of your Terraform deployment. We're going to add the following steps to our pipeline:\n\n* **Plan**: This step will run the `terraform init` and `terraform plan` commands and in the middle will also run Terratag to tag all the relevant resources. At the end it will also output the Terraform plan as a `JSON` file and create an artifact.\n\n* **Apply**: This step will run the `terraform apply` command. It depends on the plan to finish successfully. This step is done manually so we can check the plan before applying the changes.\n\n[https://gitlab.com/env0/terratag-blog-post/-/blob/main/.gitlab-ci.yml](https://gitlab.com/env0/terratag-blog-post/-/blob/main/.gitlab-ci.yml)\n\nSince Terratag scans the entire Terraform code, including any Terraform modules you may be using, we need to run the `terraform init` command before we run the Terratag command, since the init command will download all the relevant modules so Terratag can scan them.\n\nWe can see two resources in this code:\n\n* `google_compute_network`: This resource sets up the VPC. Terratag will not apply labels since the [compute network doesn't allow labels](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_network).\n\n* `google_compute_instance`: This resource sets up the VM. Terratag applies the label that the user defines.\n\nHere is the output of Terratag on this Terraform code:\n\n![tg_2](https://about.gitlab.com/images/blogimages/2021-terratag-env0/tg_2.png)\n\nThis is what this pipeline will look like in the Gitlab UI. When the Terraform plan step is successfully completed, you can manually apply the changes after reviewing the plan, which is also available as an artifact – meaning it can be downloaded and viewed locally.\n\n![tg_3](https://about.gitlab.com/images/blogimages/2021-terratag-env0/tg_3.png)\n\n### How to apply labels on GCP\n\nAs we mentioned before, labeling your resources has a lot of technical, operations, and business benefits. This blog post focuses on the cost benefit of effectivelabeling.\n\nFirst, let's see that the VM we've created is actually tagged correctly.\n\nStart by heading to the Google Cloud console. Next, go to the Compute Engine page and, under VM, search for the VM we've just created. Then, go into the VM Instance details page and see that the label exists with the right value.\n\n![tg_4](https://about.gitlab.com/images/blogimages/2021-terratag-env0/tg_4.png)\n\nNext, go to the Billing section and select \"Reports\". On the right hand side of the page there are filters. Under labels, we can filter the label key and the label value and get the cost of those resources.\n\n![tg_5](https://about.gitlab.com/images/blogimages/2021-terratag-env0/tg_5.png)\n\n### Automate labeling using Terratag\n\nTags and labels play a crucial role in managing a large-scale infrastructure projects and offer significant benefits when using tools such as [Gitlab CI/CD](https://docs.gitlab.com/ee/ci/). [Terratag](https://www.terratag.io/) has the advantage of easing the transition for Terraform users. Adopting Terratag for use with GitLab CI/CD and Terraform will also help establish a standard in your organization when it comes to use of tags and labels, eliminating the need for human intervention on a large-scale project to change your current Terraform code base.\n\nFeel free to check out the [code base](https://gitlab.com/env0/terratag-blog-post) for this blog post and leave us feedback.\n\n_Blog post coauthor [Omry Hay](https://www.linkedin.com/in/omryhay/) is the co-founder and CTO of [env0](http://www.env0.com)_\n",[894,535],{"slug":3507,"featured":6,"template":678},"gitlab-together-with-terratag-open-source-to-help-you-manage-terraform-resources","content:en-us:blog:gitlab-together-with-terratag-open-source-to-help-you-manage-terraform-resources.yml","Gitlab Together With Terratag Open Source To Help You Manage Terraform Resources","en-us/blog/gitlab-together-with-terratag-open-source-to-help-you-manage-terraform-resources.yml","en-us/blog/gitlab-together-with-terratag-open-source-to-help-you-manage-terraform-resources",{"_path":3513,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3514,"content":3519,"config":3523,"_id":3525,"_type":16,"title":3526,"_source":17,"_file":3527,"_stem":3528,"_extension":20},"/en-us/blog/specialized-sidekiq-configuration-lessons-from-gitlab-dot-com",{"title":3515,"description":3516,"ogTitle":3515,"ogDescription":3516,"noIndex":6,"ogImage":3459,"ogUrl":3517,"ogSiteName":692,"ogType":693,"canonicalUrls":3517,"schema":3518},"What we learned about configuring Sidekiq from GitLab.com","Sidekiq is a key part of GitLab, and usually works well out-of-the-box, but sometimes it needs more attention at scale.","https://about.gitlab.com/blog/specialized-sidekiq-configuration-lessons-from-gitlab-dot-com","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What we learned about configuring Sidekiq from GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Craig Miskell\"}],\n        \"datePublished\": \"2021-09-02\",\n      }",{"title":3515,"description":3516,"authors":3520,"heroImage":3459,"date":3521,"body":3522,"category":14},[1463],"2021-09-02","\nSidekiq in GitLab works perfectly well out-of-the-box in most use cases, but it requires a little more attention in larger deployments or other specialized cases. We've learned a lot about how to configure Sidekiq in large deployments by maintaining GitLab.com – one of the largest instances of GitLab in existence. We added some critical features to GitLab.com in the past year to make it easier to configure Sidekiq in a manner more consistent with the maintainer's guidance, having strayed from this path for some time.\n\nWe are publishing two blog posts devoted to this topic. In this first post, we will unpack how we configured Sidekiq for GitLab.com, and in our second post, we will explain how to apply this to your GitLab instance.\n\n**[Learn more about [how we iterated on Sidekiq background jobs](/blog/scaling-our-use-of-sidekiq/)]**\n\nWe built on that work and the learnings for the project we describe in a [blog post on Sidekiq background jobs](/blog/scaling-our-use-of-sidekiq/).\n\n## What is Sidekiq?\n\nSidekiq is usually the background job processor of choice for Ruby-on-Rails, and uses Redis as a data store for the job queues. Background (or asynchronous) job processing is critical to GitLab because there are many\ntasks that:\n\n1. Shouldn't tie up relatively expensive HTTP workers to perform long-running operations\n1. Do not operate within an HTTP-request context (e.g., scheduled/periodic tasks)\n\nFor most users, how Sidekiq uses Redis doesn't matter much – Sidekiq receives a Redis connection and magic ensues, but at larger scales it becomes important.\n\nThe [Redis data structure that Sidekiq uses for queues is a LIST](https://redis.io/commands#list) which is literally an\nordered sequence of entries. For Sidekiq, each entry is some JSON which describes the work to do (Ruby class + arguments)\nand some metadata. Out-of-the-box Sidekiq uses a single queue named \"default,\" but it's possible to create and use any\nnumber of other named queues if there is at least one Sidekiq worker configured to look at every queue. Jobs are enqueued at the end of the list using [RPUSH](https://redis.io/commands/rpush), and are retrieved for execution from the front of the list with [BRPOP](https://redis.io/commands/brpop).\n\nA key fact is that BRPOP is a blocking operation – a Sidekiq worker looking to perform work will issue and be blocked by a single BRPOP until any work is available or a timeout (2-second default) is exceeded. Redis then returns the job (if available) and removes it from the LIST.\n\n### About Concurrency (threading)\n\n{: #concurrency-threading}\n\nThis is a little bit tangential, but is a important later, so bear with me (or skip this section and come back to it later if you really need it).\n\nWhen starting Sidekiq you can tell it how many threads to run, and where each thread request works from Redis and can\npotentially be executing a job. Sounds like an easy way to allow Sidekiq to do more work, right? Well, not exactly, because threading in Ruby is subject to the Global Interpreter Lock (GIL).\nFor more, [read this great explanation about threading](https://thoughtbot.com/blog/untangling-ruby-threads), from which I will quote one key statement:\n\n> This means that no matter how many threads you spawn, and how many cores you have at your disposal, MRI will literally\nnever be executing Ruby code in multiple threads concurrently\n\nSo each Sidekiq worker process will – at best – only occupy one CPU. Threading is about avoiding constraints on\nthroughput from blocking I/O (mostly network, like Web requests or DB queries).\n\nThe default concurrency is 25, which is fine for a default GitLab installation on a single node with a single Sidekiq\nworker and a wide mix of jobs. But if the jobs are mostly CPU-bound (doing heavy CPU computation in Ruby) then 25 may\nbe far too high and counter-productive as threads compete for the GIL. Or, if your workload is heavily network\ndependent, a higher number might be acceptable since most of the time is spent waiting.\n\n**Why does this matter?** When you start splitting up (sharding) your Sidekiq fleet, you need to pay attention to what\nconcurrency you give to each shard to ensure it is compatible with the subset of Sidekiq jobs that will be executing here.\n\n## How we configured Sidekiq on GitLab.com\n\n### How we historically used Sidekiq\n\nSome time ago in [GitLab history](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/7006), we\ndecided to use one Sidekiq queue per worker (class), with the name of the queue automatically derived from the class\n(name + metadata), e.g., the class WebHookWorker runs in the web_hook queue. This approach has some benefits, but the author of\nSidekiq [does not recommend](https://github.com/mperham/sidekiq/wiki/Advanced-Options#queues) **having more than a \"handful\" of queues per Sidekiq process**.\n\nI assume handful means around 10 queues. At the time, we had about 45 job classes/queues which was beyond a \"handful\"\nbut not excessively so. However, as the GitLab code base has grown, we've added more job classes and queues. Currently, we have 440, and more will inevitably be added as new features are added. As discussed in the [previous blog post](/blog/scaling-our-use-of-sidekiq/), we split our Sidekiq worker fleet into multiple shards with different collections of jobs on each shard, based on the job resource requirements and impact on user experience.\n\nHowever, our \"catchall\" shard is still responsible for roughly 300 of those workers/queues. So each time a catchall Sidekiq worker requests the next available job, it issues a BRPOP with a huge list of queues. Redis then needs to parse that request, set up internal data structures for each queue, find the next available job, and then tear down those data structures. That's a lot of overhead just to fetch one job. We knew this was going to be a problem eventually, but for a while we were able to put our effort into other areas.\n\n### How we configured Sidekiq on GitLab.com today\n\nBy early-to-mid 2021, the Redis instance dedicated to Sidekiq was starting to hit more than 95% CPU saturation at peak:\n\n![Redis CPU Saturation](https://about.gitlab.com/images/blogimages/sidekiq-2021/redis-cpu-saturation.png){: .shadow.medium.center}\nWhat it looks like when Redis CPU usage reaches 95%\n{: .note.text-center}\n\nRedis is fundamentally single-threaded. Sure, IO Threads in version 6 changes that a bit, but command execution is serialized\nthrough the core thread, so once utilization hits 100% of a CPU core, it doesn't matter how many other idle/spare CPUs you have.\n\nIn my opinion, Redis is an absolutely amazing bit of software – the documentation is excellent, it is more robust than we deserve, and the throughput is spectacular on a single core. It has carried us a long way, but there is this hard limit that we cannot pass.\n\nIf we do exceed these limits, Sidekiq work will not be dispatched fast enough at peak times, and things will go wrong – possibly in quite subtle and troublesome ways. Last year, we gained a lot of headroom by upgrading to the latest CPUs in GCP, but that's not repeatable and merely put off the inevitable. BRPOP with many queues is the core reason for this saturation with all that overhead on every request from thousands of Sidekiq workers. So what else could we do?\n\nAs we understand it, the CPU usage is generated by a combination of the number of queues *and* the number of workers listening to those queues for work, so we had two possible paths ahead of us:\n\n1. Reduce the number of workers using a given Redis by splitting Sidekiq into multiple fleets\n1. Keep a single logical fleet and reduce the number of queues\n\nBoth were plausible options for reducing Redis CPU usage, but didn't overlap in implementation, and had quite distinct challenges.\n\nWe needed more data in order to make the best choice. We [performed some experiments](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/956) by spending a few days creating an artificial test harness that produced and consumed Sidekiq jobs at volumes mimicking what we see on GitLab.com. I cannot emphasize enough how artificial the workload is, and although we added some complexity to replicate certain aspects of production, it will never be the same as the real workload on GitLab.com. It did [show that reducing the number of queues to \"one queue per shard\" had the greatest effect](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/956#note_542558210). Other discussions also concluded this approach was likely safer, so the decision was easy to make.\n\nIf you're interested, [the code for those experiments is available here](https://gitlab.com/gitlab-com/gl-infra/sidekiq-redis-experiments/), but fair warning, it is *just* enough to do what we needed to do, and requires some manual setup.\n\n### How we adjusted the Sidekiq routing rules\n\nThis change would move us away from the one-queue-per-worker paradigm, but we still need to maintain the current\nsharding configuration on GitLab.com in particular, so our Sidekiq configuration is a fine balance of queues and workers\nand we cannot throw that all away. So we picked up work from [last year](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/194), adjusted the plan slightly, and implemented Sidekiq routing rules.\n\nPrior to routing rules, the decision on where a job ran was made by the Sidekiq workers. The image below will help you visualize the process:\n\n![One queue per worker](https://about.gitlab.com/images/blogimages/sidekiq-2021/One-Queue-Per-Worker.png){: .shadow.medium.center}\nRepresentation of Sidekiq job routing with one queue per worker\n{: .note.text-center}\n\nIn the image above, each lettered-box represents a queue, and jobs are scheduled into a queue based on their name. Where they execute is up to the workers. As you might imagine, it's entirely possible for Rails to put work into a queue that no worker is configured to pick up. With more than 400 workers that's far too easy, so for GitLab.com we ensured that didn't happen by using [queue-selector](https://docs.gitlab.com/ee/administration/sidekiq/extra_sidekiq_processes.html) expressions for most shards and the [negate option](https://docs.gitlab.com/ee/administration/sidekiq/extra_sidekiq_processes.html) to define our catchall (default) shard with some scripts to make that easier. It was still a complex process and migrating to Kubernetes added challenges for the final catchall shard as we dealt with the last NFS dependencies and had workloads running in VMs and Kubernetes.\n\nWith routing rules the decision for which workload should pick up a given job is made when the job is *enqueued*. The image below should make it easier to understand this process.\n\n![One queue per shard Sidekiq job routing with routing rules](https://about.gitlab.com/images/blogimages/sidekiq-2021/One-Queue-Per-Shard.png){: .shadow.medium.center}\nRepresentation of Sidekiq job routing with routing rules\n{: .note.text-center}\n\nRouting rules use the same queue-selector syntax, so the same expressions can still be used to represent shards as before. But because the routing rules are an ordered set of rules applied in the same way for every job no matter where it's scheduled from, we no longer need to use a complex generated \"negate\" expression to define the catchall/default shard.\n\nInstead, all that is required is a final \"default\" rule (`*`) that matches all remaining jobs and routes them to the catchall shard (we use the `default` queue for that out of convenience). We only need to ensure there is a set of Sidekiq workers listening for each of the resulting small number of simply named queues that we can base on the shard name for obviousness and simplicity. This is much easier to get right and visually verify.\n\nLearn more about [the routing rules syntax and how to configure them](https://docs.gitlab.com/ee/administration/sidekiq/processing_specific_job_classes.html).\n\n### How it's going\n\nOver the past few months we've been [working on migrating GitLab.com to this new arrangement for the catchall shard](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/447). When it came to actually switching to routing rules we took a measured approach and did it in phases.\n\nWe started by [creating a set of routing rules that recreated our existing shards](https://gitlab.com/gitlab-com/gl-infra/k8s-workloads/gitlab-com/-/merge_requests/878/diffs), but with the 'nil' target, which tells Rails to keep using the\nper-worker queue. This gave us a base from which we could maintain existing behavior but then start routing to a limited named queues in simple iterative steps.\n\nFrom there, we could add new rules immediately before the final catchall rule to the `default` queue, which GitLab doesn't actively use out-of-the-box, but which the catchall shard listens to. First, we [added some rules that don't normally run on GitLab.com](https://gitlab.com/gitlab-com/gl-infra/k8s-workloads/gitlab-com/-/merge_requests/918/diffs), but which we could use to test (e.g., [Chaos::SleepWorker](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/workers/chaos/sleep_worker.rb)).\n\nNext, we moved a feature category with a couple of jobs, first [routing](https://gitlab.com/gitlab-com/gl-infra/k8s-workloads/gitlab-com/-/merge_requests/978/diffs) them to `default` then we [stopped listening to them in the catchall shard](https://gitlab.com/gitlab-com/gl-infra/k8s-workloads/gitlab-com/-/merge_requests/979/diffs).\n\nWe repeated this pattern of rerouting then not listening in [batches](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1074#migration-phases) using [Feature Categories](https://docs.gitlab.com/ee/development/feature_categorization/) as a relatively simple way to select groups of related workers. After selecting the groups, we built them up from batches with small groups of low-use categories with lots of classes but not many jobs per second to single feature categories at the end. `Continuous_integration` (18% of catchall shard jobs) and `integrations and `source_code_management` are each generating about 30% of catch-all shard jobs. We gained confidence in the queue-handling as this progressed, and gave ourselves plenty of opportunity to gather data and pause if necessary.\n\nAt each stage we stopped listening to several 10s of queues from our catchall Kubernetes deployment, and gradually saw the CPU\nsaturation on Redis drop. After finally shutting down Sidekiq on our (now legacy) virtual machines, we've reached a\nfinal state where at peak times CPU on Redis reaches only around 75%, down from peaks of 95% or higher:\n\n![Reduced CPU usage in Redis](https://about.gitlab.com/images/blogimages/sidekiq-2021/cpu-then-vs-now.png){: .shadow.medium.center}\nReduced CPU usage in Redis\n{: .note.text-center}\n\n### What will we do next?\n\nFirst, we need to finish the [one-queue-per-shard migration](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/469)\nfor all the other shards, aside from catchall. These shards won't have the same level of impact because they run far\nfewer queues than catchall, but will lead to a consistent job routing strategy. In the long term, Redis\nwill eventually become the bottleneck again, and we're going to have to either split Sidekiq into multiple fleets, or change\nto something architecturally different. Multiple fleets has some challenges, but means we can keep using the existing\ntechnologies we have invested time and tooling in (including Omnibus for self-managed deployments). But given the\nbottleneck is still eventually going to be Redis CPU, this might well be the time to look at other job processing\nparadigms.\n\n_In our next blog post, we explain how you can take what we learned about configuring Sidekiq for GitLab.com and apply it to your own large instance of GitLab._\n\nCover image by [Jerry Zhang](https://unsplash.com/@z734923105) on [Unsplash](https://www.unsplash.com)\n",{"slug":3524,"featured":6,"template":678},"specialized-sidekiq-configuration-lessons-from-gitlab-dot-com","content:en-us:blog:specialized-sidekiq-configuration-lessons-from-gitlab-dot-com.yml","Specialized Sidekiq Configuration Lessons From Gitlab Dot Com","en-us/blog/specialized-sidekiq-configuration-lessons-from-gitlab-dot-com.yml","en-us/blog/specialized-sidekiq-configuration-lessons-from-gitlab-dot-com",{"_path":3530,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3531,"content":3536,"config":3542,"_id":3544,"_type":16,"title":3545,"_source":17,"_file":3546,"_stem":3547,"_extension":20},"/en-us/blog/its-time-to-upgrade-docker-engine",{"title":3532,"description":3533,"ogTitle":3532,"ogDescription":3533,"noIndex":6,"ogImage":1579,"ogUrl":3534,"ogSiteName":692,"ogType":693,"canonicalUrls":3534,"schema":3535},"It's time to update Docker Engine","Now that Alpine Linux 3.14 is being used by more images, it's time to upgrade Docker Engine to 20.10.6 or newer.","https://about.gitlab.com/blog/its-time-to-upgrade-docker-engine","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"It's time to update Docker Engine\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tomasz Maczukin\"}],\n        \"datePublished\": \"2021-08-26\",\n      }",{"title":3532,"description":3533,"authors":3537,"heroImage":1579,"date":3539,"body":3540,"category":14,"tags":3541},[3538],"Tomasz Maczukin","2021-08-26","\n\n[Alpine Linux](https://alpinelinux.org/) distribution is the base OS used by many Linux container images. It provides a handy packaging mechanism, new versions of software, and a quick and predictable release cycle – all while being distributed using a minimal image size. It's used by many very popular container images, for example `docker:dind`,\n[widely used in GitLab CI/CD workloads](https://docs.gitlab.com/ee/ci/docker/using_docker_build.html) handling container\nimages building and management in the jobs.\n\nOn June 15, 2021, Alpine Linux released version 3.14. [As documented in the release notes](\nhttps://wiki.alpinelinux.org/wiki/Release_Notes_for_Alpine_3.14.0#faccessat2), changes in the `musl` library require\nan updated version of [runc](https://github.com/opencontainers/runc) or updated version of\n[Docker](https://github.com/moby/moby) for the Alpine 3.14-based images to work properly.\n\nSoftware products across the computer industry have started migrating their Alpine Linux-based container images to 3.14 since it includes significant updates for various network and security-oriented use cases. In cases where the GitLab Runner environment uses a Docker version older than 20.10.6 to handle new container images based on Alpine 3.14, CI/CD jobs may encounter unexpected problems during execution and cause jobs to fail.\n\n[We encountered this problem](https://gitlab.com/gitlab-org/gitlab/-/issues/335641) at GitLab a few weeks ago, when\nthe `ruby:2.7` image was migrated to use Alpine Linux 3.14 as the base. We used a quick workaround to unlock our\npipelines by explicitly tagging the Alpine 3.13 version of the image (fortunately, it was provided!). To fully\nresolve the problem for all GitLab.com users who use our instance runners, we pushed forward an update to our autoscaled\nVMs base image, which included an update of Docker Engine.\n\n\nOne of the popular and widely used container images that is migrating to Alpine 3.14 [are the `docker` and \n`docker:dind` images](https://github.com/docker-library/docker/pull/317).\nWhat's important is the change will rebuild\nand re-push the existing specific images for supported versions, like `docker:20.10-dind`. This means users\nwho pinned their version of the Docker-in-Docker service in their `.gitlab-ci.yml` files will still get the image\nversion updated to Alpine 3.14. Using a Docker Engine older than 20.10.6 will probably create\nproblems for the user.\n\n## What's the solution?\n\nThe real solution is to upgrade the execution environment accordingly to Alpine's release notes, which state:\n\n> Therefore, Alpine Linux 3.14 requires **at least one** of the following:\n>\n> 1. runc v1.0.0-rc93\n>    - If using Docker's Debian repositories, this is part of containerd.io 1.4.3-2\n>    - If using Docker Desktop for Windows or Mac, this is part of Docker Desktop 3.3.0\n> 1. Docker 20.10.0 (which contains [moby commit a181391](https://github.com/moby/moby/commit/a18139111d8a203bd211b0861c281ebe77daccd9))\nor greater, **AND** libseccomp 2.4.4 (which contains backported [libseccomp commit 5696c89](https://github.com/seccomp/libseccomp/commit/5696c896409c1feb37eb502df33cf36efb2e8e01))\nor greater. In this case, to check if your host libseccomp is faccessat2-compatible, invoke\n`scmp_sys_resolver faccessat2`. If `439` is returned, faccessat2 is supported. If `-1` is returned, faccessat2 is not\nsupported. Note that if runc is older than v1.0.0-rc93, Docker must still be at least version 20.10.0, regardless of\nthe result of this command.\n> 1. As a workaround, in order to run under old Docker or libseccomp versions,\n[the moby default seccomp profile](https://github.com/moby/moby/blob/master/profiles/seccomp/default.json) should be\ndownloaded and on line 2, `defaultAction` changed to `SCMP_ACT_TRACE`, then `--seccomp-profile=default.json` can be\npassed to dockerd, or `--security-opt=seccomp=default.json` passed to `docker create` or `docker run`. This will cause\nthe system calls to return ENOSYS instead of EPERM, allowing the container to fall back to faccessat.\n>\n> Note: When using nested Docker, **every layer** must meet one of the above requirements, since if\n**any layer** improperly denies the use of faccessat2, Alpine Linux 3.14 will not function correctly.\n\nThere are several ways to solve this problem, but since they depend on a specific configuration, users need to choose the solution that best matches their environment.\n\nAlthough the [release notes](https://wiki.alpinelinux.org/wiki/Release_Notes_for_Alpine_3.14.0#faccessat2) mentions Docker 20.10.0, (which brings some needed changes), the release notes also mention that the updated version of libseccomp must be used in this case. For environments that use Docker Engine on Linux, these criteria should be met by Docker Engine 20.10.6 and higher.\n\nThe requirement for nested Docker environments (which in case of GitLab CI/CD mostly means\nthe Docker-in-Docker based jobs) to work properly with images based on Alpine Linux 3.14, both the Docker\nEngine on Runner's host **AND** the `docker:dind` image must be updated to at least 20.10.6.\n\nTo summarize:\n\n1. Users **using images** based on Alpine Linux 3.14 for their job execution (read: as the value of `image:` or\n`services:` keywords in `.gitlab-ci.yml`) must update Docker Engine on their hosts to version 20.10.6 or higher.\n\n1. Users **building images** based on Alpine Linux 3.14 using the Docker-in-Docker approach (read: using\n`services: [docker:X.Y-dind]` and `script: [..., docker build -t my/image ., ...]` in `.gitlab-ci.yml`) must\nalso update the `docker:dind` image version to `docker:20.10.6-dind` or higher.\n\n**For users of GitLab.com instance-level Runners, the upgrade of Docker Engine was completed a few weeks ago. Still, users likely need to update the used Docker-in-Docker service to `docker:20.10.6-dind` or higher.**\n\n## Some temporary workarounds\n\nSince the update of Docker Engine may not be easy in some environments, the only known workaround is to pin used\nimages to versions using Alpine Linux 3.13. As you can see in the [Docker library issue](https://github.com/docker-library/docker/pull/317#issuecomment-880140631), many projects have already found this\nis a problem for their users and provided the versions of images tagged with `-alpine3.13` suffix.\n\nThe Docker-in-Docker case described in this post [was done quite recently](https://github.com/docker-library/docker/pull/327).\nUsers who can't update the Docker Engine on the Runner host or for Docker-in-Docker can temporarily solve\nthe problem by using for example `services: [docker:19.03.15-dind-alpine3.13]`.\n\nRemember that this is only a temporary solution. For example, the official `docker` image\n[have already abandoned the 19.03 line](https://github.com/docker-library/docker/pull/329) and new images for `19.03.x` will\nnot be released.\n\nThe only real, long-term solution is to plan and maintain the upgrade. \n\n",[110,728],{"slug":3543,"featured":6,"template":678},"its-time-to-upgrade-docker-engine","content:en-us:blog:its-time-to-upgrade-docker-engine.yml","Its Time To Upgrade Docker Engine","en-us/blog/its-time-to-upgrade-docker-engine.yml","en-us/blog/its-time-to-upgrade-docker-engine",{"_path":3549,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3550,"content":3556,"config":3561,"_id":3563,"_type":16,"title":3564,"_source":17,"_file":3565,"_stem":3566,"_extension":20},"/en-us/blog/stageless-pipelines",{"title":3551,"description":3552,"ogTitle":3551,"ogDescription":3552,"noIndex":6,"ogImage":3553,"ogUrl":3554,"ogSiteName":692,"ogType":693,"canonicalUrls":3554,"schema":3555},"Write a stageless CI/CD pipeline using GitLab 14.2","With GitLab 14.2, you can write a complete CI/CD pipeline without defining any stages.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679507/Blog/Hero%20Images/ci-cd.png","https://about.gitlab.com/blog/stageless-pipelines","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Write a stageless CI/CD pipeline using GitLab 14.2\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2021-08-24\",\n      }",{"title":3551,"description":3552,"authors":3557,"heroImage":3553,"date":3558,"body":3559,"category":14,"tags":3560},[1020],"2021-08-24","\n\nGitLab CI/CD technology has historically divided a pipeline into stages based on the typical development workflow. Now that [GitLab 14.2 has launched](/releases/2021/08/22/gitlab-14-2-released/), users can speed up cycle times by using the [`needs`](https://docs.gitlab.com/ee/ci/yaml/#needs) command to write a complete CI/CD pipeline with every job in the single stage. In fact, you can omit stages completely and have a [\"stageless\" pipeline](https://about.gitlab.com/releases/2021/08/22/gitlab-14-2-released/#stageless-pipelines) that executes entirely based on the `needs` dependencies.\n\n## Understanding stages\n\nIn GitLab CI/CD, you use [stages](https://docs.gitlab.com/ee/ci/yaml/#stages) to group jobs based on the development workflow and control the order of execution for CI/CD jobs.\n\nPipelines execute each stage in order, where all jobs in a single stage run in parallel. After a stage completes, the pipeline moves on to execute the next stage and runs those jobs, and the process continues like this until the pipeline completes or a job fails. If a job fails, the jobs in later stages don't start at all.\n\n## History of stages in GitLab CI/CD\n\nWhen we first designed GitLab CI/CD, we knew that in a continuous integration workflow you build and test software every time a developer pushes code to the repository. The use of stages in GitLab CI/CD helped establish a mental model of how a pipeline will execute. By default, stages are ordered as: `build`, `test`, and `deploy` - so all stages execute in a logical order that matches a development workflow. The first step is to build the code, and if that works, the next step is to test it. If the tests pass, then you deploy the application.\n\nOf course, you can actually create as many stages as you like and order them as desired. We also introduced the `.pre` and `.post` stages which are predefined stages that let you set certain jobs to always run at the beginning (`.pre`) or end (`.post`) of your pipeline. GitLab CI/CD used stages for the past few years.\n\n## Starting to break out of stage order\n\nLast year we introduced the [`needs`](https://docs.gitlab.com/ee/ci/yaml/#needs) keyword which allows a user to create a Directed Acyclic Graphs (DAG) to speed up the pipeline. A job that uses the `needs` keyword creates a dependency between it and one or more different jobs in earlier stages. The job is allowed to start as soon as the earlier jobs finish, skipping the stage order to speed up the pipeline.\n\nIn a sense, you can think of a pipeline that only uses stages as the same as a pipeline that uses `needs` – except every job \"needs\" every job in the previous stage. On the other hand, if jobs in a pipeline *do* use `needs`, they only \"need\" the exact jobs that will allow them to complete successfully. They shouldn't need all the jobs in the previous stage. For example, there's no need for a ruby test job to wait for a javascript linter to complete.\n\n## Stageless pipelines become reality\n\nThe `needs` keyword quickly became popular among our users and helped optimize and accelerate CI/CD pipelines. However it had one limitation: A `needs` dependency could only exist between the jobs in different stages. This limitation was a pain point for our users because they wanted to configure the pipeline based on the `needs` dependencies only and drop the use of stages completely. The importance of adding this functionality became clear because this was one of the most popular [feature requests](https://gitlab.com/gitlab-org/gitlab/-/issues/30632) for GitLab CI/CD.\n\nNow in GitLab 14.2, [you can finally define a whole pipeline using nothing but `needs` to control the execution order](/releases/2021/08/22/gitlab-14-2-released/#stageless-pipelines). No more need to define any stages if you use `needs`!\n\n## Are we getting rid of stages?\n\nNo, we do not have any plans to remove stages from our GitLab CI/CD, and it still works great for those that prefer this workflow.\n\nIn fact if you build a \"stageless\" pipeline, there will still be at least one stage that holds all the jobs. Removing stages was never the goal. Our goal is still to support you in building better and faster pipelines, while providing you with the high degree of flexibility you want.\n\nAs always, share any thoughts, comments, or questions, by [opening an issue in GitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/new?issue%5Bmilestone_id%5D=) and mentioning me (@dhershkovitch).\n",[832,937],{"slug":3562,"featured":6,"template":678},"stageless-pipelines","content:en-us:blog:stageless-pipelines.yml","Stageless Pipelines","en-us/blog/stageless-pipelines.yml","en-us/blog/stageless-pipelines",{"_path":3568,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3569,"content":3574,"config":3580,"_id":3582,"_type":16,"title":3583,"_source":17,"_file":3584,"_stem":3585,"_extension":20},"/en-us/blog/how-grammatech-and-gitlab-enables-better-devsecops",{"title":3570,"description":3571,"ogTitle":3570,"ogDescription":3571,"noIndex":6,"ogImage":2478,"ogUrl":3572,"ogSiteName":692,"ogType":693,"canonicalUrls":3572,"schema":3573},"How a new integration helps GitLab customers secure their code","GitLab Ultimate customers can use CodeSonar from GrammaTech for SAST and to bake protection into every stage of software development.","https://about.gitlab.com/blog/how-grammatech-and-gitlab-enables-better-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How a new integration helps GitLab customers secure their code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christian Simko\"}],\n        \"datePublished\": \"2021-08-20\",\n      }",{"title":3570,"description":3571,"authors":3575,"heroImage":2478,"date":3577,"body":3578,"category":14,"tags":3579},[3576],"Christian Simko","2021-08-20","\n\nSoftware development teams that embrace agile and [DevSecOps](/topics/devsecops/) are able to code with a security-first mindset, which is essential for industries that build particularly complicated products where security is paramount, like: Aerospace and defense, automotive, industrial controls, medical devices, and more.\n\nStatic application security testing (SAST) solutions, like [CodeSonar® from GrammaTech](https://www.grammatech.com/products/source-code-analysis), integrate directly into CI/CD pipelines to bake security into every step of the software development life cycle (SDLC) – protecting your products every step of the way. Security solutions like GrammaTech pair well with an all-in-one DevOps Platform like GitLab, and allow development teams to follow best practices and industry standards to develop code that is better quality and more secure.\n\n## The GrammaTech and GitLab integration\n\nThe GrammaTech module for [GitLab Ultimate](/pricing/ultimate/) provides native SAST capabilities that scan code for defects in CI/CD pipelines and eliminates the need for any integration and maintenance by users. It allows developers to assess code continuously, avoiding costly mistakes and the duplicative work associated with waiting until the testing phase to scan for security problems.\n\nWe recognize that developers face pressure to meet aggressive deadlines for delivering new software, as rolling releases and agile development practices have developers pushing new features and code into production faster. Integrating SAST tools like CodeSonar into a DevOps Platform like GitLab Ultimate is a natural consequence to more iterative development in companies that embrace DevSecOps practices. CodeSonar helps developers shift security left by detecting and eliminating bugs and vulnerabilities at the earliest stages of the SDLC.\n\n## SAST with CodeSonar\n\nCodeSonar uses a unified data flow and symbolic execution analysis to examine the computation of the complete application. This approach is deeper than typical pattern-matching syntax analysis, and discovers 3-5x more defects on average.\n\nStatic analysis is unlike other software development tools (i.e., testing tools, compilers, and configuration management) becuase it can be integrated into the development process at any time with ease. CodeSonar simply attaches to your existing build environments to add analysis information to your verification process.\n\n### How does CodeSonar work?\n\nLike a compiler, CodeSonar does a \"build\" of your code using the existing build environment, but instead of creating object code, CodeSonar creates an abstract model of your entire program. From the derived model, CodeSonar's symbolic execution engine explores program paths, reasoning about program variables, and how they relate. Advanced theorem-proving technology prunes infeasible program paths from the exploration.\n\n![How CodeSonar works to secure code](https://about.gitlab.com/images/blogimages/codesonar.png){: .shadow.center}\nSee how CodeSonar secures code.\n{: .note.text-center}\n\nCheckers in CodeSonar perform static code analysis to find common defects, violations of policies, etc. Checkers operate by traversing or querying the model and looking for particular properties or patterns that indicate defects. Sophisticated symbolic execution techniques explore paths through a control-flow graph – the data structure representing paths that might be traversed by a program during its execution. When the path exploration notices an anomaly, a warning is generated.\n\nAn astronomical number of combinations of circumstances must be modeled and explored, so CodeSonar employs a variety of strategies to ensure scalability. For example, procedure summaries are refined and compacted during the analysis, and paths are explored in a way that minimizes paging.\n\n## Continuous Integration enabled by GitLab\n\nIntegrating CodeSonar into GitLab's pipeline is done with each [merge request (MR)](https://docs.gitlab.com/ee/user/project/merge_requests/), automatically analyzing your code and returning any vulnerabilities found via the GitLab SAST interface. Users can consult the GitLab Security Dashboard to get an overview of code security, and the Vulnerability Report gets into the details.\n\n![How CodeSonar integrates with GitLab CI pipelines](https://about.gitlab.com/images/blogimages/codesonar2.png){: .shadow.center}\nHow CodeSonar integrates with GitLab CI pipelines.\n{: .note.text-center}\n\n### Review CodeSonar warnings in GitLab Vulnerability Reports\n\nCodeSonar displays vulnerabilities right in the GitLab UI – you can review a warning, create a GitLab issue, and assign it to a developer – all in a single application. You can also dismiss vulnerabilities. CodeSonar's fingerprinting technology ensures that GitLab won't ever show dismissed vulnerabilities to you again.\n\n### Get a more detailed warning view\n\nSometimes you need more information to decide how to handle a particular warning. CodeSonar and GitLab make this easy. The CodeSonar warning message can be viewed directly in GitLab, and CodeSonar's detailed warning reports with annotated source code are just a click away – no copy and pasting, or searching for line numbers.\n\n![Example of GitLab vulnerability report](https://about.gitlab.com/images/blogimages/codesonar3.png){: .shadow.center}\nSee example of a GitLab vulnerability report and detailed view of warnings.\n{: .note.text-center}\n\n## How to get started\n\nA typical way to use the GitLab CI/CD pipeline is to set it up to run whenever new Git commits are submitted to a MR. When you add CodeSonar static analysis to your MR pipeline, GitLab will display the new analysis warnings on the MR page. The full set of warnings is always available on the pipeline page.\n\n### Prerequisites to use CodeSonar\n\n1. The CodeSonar integration requires a working instance of *GitLab Ultimate edition*.\n2. You must have a source code project in your GitLab instance that you wish to analyze. Set up a [GitLab CI/CD](https://docs.gitlab.com/ee/ci/) pipeline for your project that can build your source code. This will include the configuration of one or more GitLab pipeline jobs ([more on how to configure GitLab Runners](https://docs.gitlab.com/runner/configuration/)).\n3. If you use Docker, ensure you have [Docker Engine](https://docs.docker.com/engine/install/) version 19.03.12 or later.\n4. Use the CodeSonar software package that is appropriate for your GitLab pipeline job runner's operating platform.\n5. Set up a dedicated, \"persistent\" CodeSonar Hub to coordinate and receive the results of your analysis. See your CodeSonar manual for how to set up and license a Hub.\n6. You will need a valid CodeSonar Hub license that is appropriate to your configuration and the CodeSonar GitLab Integration software package.\n\nRead the [instructions on installing the CodeSonar GitLab integration](https://support.grammatech.com/documentation/codesonar/integrations/gitlab/).\n\n_Christian Simko is the Director of Product Marketing at GrammaTech._\n",[1307,232,915],{"slug":3581,"featured":6,"template":678},"how-grammatech-and-gitlab-enables-better-devsecops","content:en-us:blog:how-grammatech-and-gitlab-enables-better-devsecops.yml","How Grammatech And Gitlab Enables Better Devsecops","en-us/blog/how-grammatech-and-gitlab-enables-better-devsecops.yml","en-us/blog/how-grammatech-and-gitlab-enables-better-devsecops",{"_path":3587,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3588,"content":3594,"config":3601,"_id":3603,"_type":16,"title":3604,"_source":17,"_file":3605,"_stem":3606,"_extension":20},"/en-us/blog/100-runners-in-less-than-10mins-and-less-than-10-clicks",{"title":3589,"description":3590,"ogTitle":3589,"ogDescription":3590,"noIndex":6,"ogImage":3591,"ogUrl":3592,"ogSiteName":692,"ogType":693,"canonicalUrls":3592,"schema":3593},"Setting up 100 AWS Graviton Spot Runners for GitLab","Utilizing the GitLab HA Scaling Runner Vending Machine for AWS Automation to setup 100 GitLab runners on AWS Spot.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669882/Blog/Hero%20Images/hundredgitlabspotrunner.png","https://about.gitlab.com/blog/100-runners-in-less-than-10mins-and-less-than-10-clicks","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to provision 100 AWS Graviton GitLab Spot Runners in 10 Minutes for $2/hour\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"},{\"@type\":\"Person\",\"name\":\"Nupur Sharma\"}],\n        \"datePublished\": \"2021-08-17\",\n      }",{"title":3595,"description":3590,"authors":3596,"heroImage":3591,"date":3598,"body":3599,"category":14,"tags":3600},"How to provision 100 AWS Graviton GitLab Spot Runners in 10 Minutes for $2/hour",[1701,3597],"Nupur Sharma","2021-08-17","\n\nManaging elastically scaled or highly available compute infrastructures is one of the key challenges the cloud was built for. Application scaling concerns can be handled by cloud services that are purpose designed, rigorously tested, and continually improved. This article dives into some specific enablement automation that brings the benefits of AWS Autoscaling Groups (ASG) to runner management. There are benefits to both the largest fleets and single instance runners.\n\nEmbedded in this article is a YouTube video that demonstrates the deployment of 100 GitLab runners on Amazon EC2 Spot compute in less than 10 minutes using less than 10 clicks. The video also shows updating this entire fleet in under 10 minutes to emphasize the time savings of built-in maintenace.\n\nThe information and automation in this article applies to GitLab Private Runners which are deployed on your own compute resources. Self-managed GitLab instances require private runners, but they can also be configured and used with GitLab.com SaaS accounts.\n\n## Well-architected runner management\n\nThere are many different reasons that a customer might need to deploy multiple runners with various characteristics. Some of the more popular ones are:\n\n- Workloads that require large-scale runner fleets.\n- To gain cost savings through Spot compute, uptime scheduling, and ARM architecture.\n- Projects with high demand of CI activity to make sure that the runner is not being held up by jobs on another project.\n- Jobs that have special security requirements, e.g., security credentials, role-based access or managed identities for Continuous Delivery (CD). These security requirements can enable instance-level (AWS IAM Instance Profile) security by allowing runners with sufficient rights to deploy in specific target environments. For example, a CD runner for non-production environments and a different runner for production.\n- Implementing role-based access control rather than user-based. This means users don't have to use secrets to manage security requirements for CI jobs to accomplish their tasks.\n- Development teams can be confident the runner has the same capabilities for CI and CD automation they test through their interactive logins by leveraging a common IAM role.\n\n### The challenges of building production-grade elastic GitLab Runners\n\n[The GitLab Runner](https://docs.gitlab.com/runner/) is the workhorse of GitLab CI and CD capabilities. The runner can handle numerous operating environments and automation functions for a GitLab instance. The GitLab Runner has become very sophisticated due to the broad range of supported environments. In order to successfully configure the GitLab Runner as a set-it-and-forget-it service, the user has to work through many different decisions and considerations. We summarize some of the GitLab Runner-specific considerations that can be challenging:\n\n- There are a lot of configuration options and scenarios to sort through. It can be an iterative process to discover what needs to be done to set up GitLab Runners.\n- Ensuring runners are a production-grade capability requires Infrastructure as Code (IaC) development so that high availability and scaling can be achieved by automatically spawning new instances.\n- Ensuring that runner deregistration happens correctly when GitLab Runners are automatically scaled in.\n- Additional cost-saving configurations, such as Spot compute and scheduled runner uptime, can complicate the automation requirements for AWS Autoscaling Groups (ASGs).\n- Large organizations often want developers to be able to easily self-service deploy runners with various configurations. Service Management Automation (SMA) has been made popular with products like Service Now, AWS Service Catalog, and AWS Control Tower. This automation is compatible with SMA.\n- It can be difficult to map runners to AWS and map AWS to runners in large organizations with numerous runners and AWS accounts.\n\n### Introducing the GitLab HA Scaling Runner Vending Machine for AWS\n\nAn effective way to handle multiple design considerations is to make a reusable tool. To help you with best practice runner deployments on AWS, we created the [GitLab HA Scaling Runner Vending Machine for AWS](https://gitlab.com/guided-explorations/aws/gitlab-runner-autoscaling-aws-asg/) (\"The GitLab Runner Vending Machine\"). It is created in AWS’ Infrastructure as Code, known as CloudFormation.\n\n> **Designed with AWS Well Architected:** This automation has many features beyond the scope of this blog post. The primary focus of this blog post is on managing costs. See the [full list of features here](https://gitlab.com/guided-explorations/aws/gitlab-runner-autoscaling-aws-asg/-/blob/main/FEATURES.md).\n\nThe GitLab Runner Vending Machine has the following cost management and scaling management benefits, exposed as a variety of parameters:\n\n- The ability to leverage Spot compute instances. This is important because it leaves CI/CD pipeline developers in charge of whether specific Gitlab CI/CD jobs run on Spot compute or not.\n- ASG-scheduled scaling so that a runner or runner fleet can be completely shutdown when not in use.\n- The GitLab Runner Vending Machine can leverage ARM compute for Linux - which runs faster and costs less.\n- It can also use ASG to update all runners in a fleet with the latest machine images and GitLab Runner version (or a specific version). When maintenance is not built-in, the labor cost of keeping things up-to-date can be significant.\n- Runner naming and tagging in AWS and GitLab, which eases the burden of locating runner instances and managing orphaned runners registrations, whether it is manual or automated.\n\n### How to save money with The GitLab Runner Vending Machine\n\nSignificant savings are possible with this IaC, whether your team wants to save on a single runner or a fleet of them.\n\nThe savings calculations below are for a single runner and should be linear for a given workload. To calculate your savings for more runners, simply multiply the final result by the number of runner instances. The available \"Runner Minutes\" per hour is calculated as the runner's job concurrency setting multiplied by the minutes in an hour. For this exercise, we'll use job concurrency of \"10\". This number should be changed depending on the instance types you are using and the load testing of your typical CI/CD workloads.\n\nJust like most performance analysis, we are assuming that hardware resource utilization is optimal and consistent. If a runner cluster can sustain respectable performance with 80% CPU loading, this calculation assumes that would be maintained regardless of the size of the cluster.\n\n#### AWS Graviton ARM and Spot savings\n\nThe GitLab Runner engineering team has completed performance testing that demonstrates performance gains of more than 30% on some AWS Graviton (ARM-based) instance types. Assuming that runners are performance-managed for optimized utilization, this gain is a direct cost savings. Just recently, we shared [how deploying GitLab on Arm-based AWS Graviton2 resulted in cost savings of 23% and 36% performance gains](/blog/achieving-23-cost-savings-and-36-performance-gain-using-gitlab-and-gitlab-runner-on-arm-neoverse-based-aws-graviton2-processor/).\n\n![ARM Efficiency Test Results For GitLab Runner](https://about.gitlab.com/images/blogimages/hundred-runners/hundredrunners-image1.png)\nGitLab Runner testing results for ARM-efficiency gains.\n{: .note.text-center}\n\n#### Scheduling savings\n\nThe savings can be dramatic when teams are able to turn off runners when not in use. For instance: Scheduling a runner to operate for 40-hours per week saves 76% when compared to the cost of running it for 168 hours. Runners that are just in use for 10 hours per week saves 94%.\n\n#### Combining scheduling, Spot, and ARM to save 97%\n\nJust for fun, let's see what savings are possible by comparing a standard runner scenario with deploying runners in customized, stand-alone instances to the maximum savings automation can deliver.\n\nImagine I am a developer who set up a custom GitLab Runner on an m5.xlarge instance, which is x86 the architecture, for a development team that works for 40 hours on the same time zone. Since there is no automation, the GitLab Runner runs 24/7. We will assume a job concurrency of 10, which gives 600 \"runner minutes\" per hour of run time. Scheduling uptime, running on Spot, and leveraging ARM can all be achieved quickly by redeploying the runner with The GitLab Runner Vending Machine.\n\nHere is the calculation to run the configuration described above, for one week: On Demand, x86, Always On: 1 x m5.xlarge = .192/hr x 168 hrs/week = **$32/week or $1664/year**\n\nHere are the savings that come from running Spot, ARM, and scheduling the Runner to be up just 40hrs/week: 1 x m6g.large Spot = .0419 x 40hrs/week x 64% (36% better performance) = **$1/week**\n\n$1/$32 x 100 = 3.125% of the original cost for the same work. In other words, **we just saved 97%** without ever impacting the ability to get the job done.\n\nIn short, The GitLab Runner Vending Machine intends to bring the many cost saving mechanisms of AWS Cloud computing to your GitLab Runner fleets.\n\nYou can save costs by using ARM/Graviton instances, Spot compute, or by scheduling uptime. In many cases, you can combine all three savings mechanisms for maximum impact.\n\n### Special pipeline building concerns for Spot Runners\n\nSpot instances can disappear with as little as two minutes of warning. This inevitably means some runners will be terminated while jobs are still in progress. CI/CD pipeline developers must take into account whether a job ought to run on compute resources that can disappear with short notice (so short as to be considered \"no notice\"). This comes down to deciding what jobs are OK to run on Spot and what jobs should instead run on AWS' persistent compute known as \"On-Demand\".\n\nThe GitLab Runner Vending Machine accounts for these constraints by tagging runner instances in GitLab with `computetype-spot` or `computetype-ondemand` – indicating in the \"tags\" segment of GitLab CI/CD jobs if a job should run on Spot compute.\n\nSome types of CI workloads, e.g., mass performance testing or large unit testing suites, may already have work queues and work tracking that make it ideal for Spot compute. Other activities, e.g., polling another system for a deployment status, could suffer a material discrepancy if terminated permaturely. Others, such as building the application, are sort of in the middle. Usually, restarting the build is sufficient.\n\n### Job configuration for Spot\n\nIf you need to reschedule terminated work, it is helpful to configure GitLab’s job `retry:` keyword. When working with a dispatching engine or work queue that automatically accounts for incompleted work by processing agents, the retry configuration is unnecessary.\n\nHere is an example that implements both of these concepts:\n\n```\nmy-scaled-test-suite:\n  parallel: 100\n  tags:\n  - computetype-Spot\n  retry:\n    max: 2\n    when:\n      - runner_system_failure\n      - unknown_failure\n```\n\nThe usage and limitations of `retry:` are discussed in greater detail in the [GitLab CI documentation on retry](https://docs.gitlab.com/ee/ci/yaml/#retry).\n\n### How to get started\n\nThe CloudFormation templates for the [GitLab Runner Vending Machine are managed in a public project on GitLab.com](https://gitlab.com/guided-explorations/aws/gitlab-runner-autoscaling-aws-asg/). There is a lot of information in the project about how the solution works and what problems it aims to solve, and will be useful for very experienced AWS builders.\n\nBut to keep it simple for users who want the quickest path to creating runners of all sizes, it also has an \"easy button\" page that has a table that looks like this:\n\n![Easy Button Page Sample](https://about.gitlab.com/images/blogimages/hundred-runners/hundredrunners-image2.png)\nThe easy buttons launch a CloudFormation Quick Create that only requires filling in a few fields.\n{: .note.text-center}\n\nKeep in mind that easy buttons intentionally hide the high degree of customization that is possible with this automation by setting the parameters for the most common scenarios in advance. Advanced AWS users should read more of the documentation in the repository to understand that the GitLab Runner Vending Machine is also capable of creating sophisticated runner fleets.\n\nFirst, click the CloudFormation icons to launch the Easy Button template directly into the CloudFormation Quick Create console. The Quick Create console is designed for simplicity to enable you to complete the prompts and then click one button to launch the stack.\n\n![CloudFormation Quick Create Example](https://about.gitlab.com/images/blogimages/hundred-runners/hundredrunners-image3.png){: .shadow.medium.center}\nThis is a typical Quick Create form for the GitLab Vending Machine easy buttons.\n{: .note.text-center}\n\nNext, select the deploy region by using the drop down menu in the upper right of the console (where the screenshot says \"Oregon\").\n\nIn most cases, you will only need to add your GitLab instance URL (GitLab.com is fine if that is where your repositories are), and the runner token, which you retrieve from the group level or project you wish to attach the runners to. If you are registering against a self-managed instance, you can use the instance-level tokens from the administrator console to register the runner for use across the entire instance. Read on for [instructions for finding Runner Registration Tokens](https://docs.gitlab.com/runner/register/#requirements).\n\nA few other customization parameters are available for your convenience.\n\nNote that the automation attempts to use the default VPC of the region in which you deploy and the default security group for the VPC. In some organizations, default VPCs and/or their security groups are locked. You can deploy to custom VPCs by using the full template instead of an easy button. On the easy button page look for the footnote \"Not any easy button person?\"\" to find a link to the full template.\n\nWatch the video below to see the deployment of provisioning 100 GitLab Spot Runners on AWS in less than 10 minutes and in less than 10 clicks for just $5 per hour.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/EW4RJv5zW4U\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nCheck out the YouTube playlist for more relevant videos about [GitLab and AWS](https://youtube.com/playlist?list=PL05JrBw4t0Ko30Bkf8bAvR-8E441Fy2G9)\n\n### This automation does much, much more\n\nWhile this article focused how much you can saving while using Spot for scaled runners, the underlying automation is capable of many other scenarios. Below is a summary of the additional features and benefits covered in the documentation.\n\n- Scaled runners that are persistent (not Spot) ([see more easy buttons here](https://gitlab.com/guided-explorations/aws/gitlab-runner-autoscaling-aws-asg/-/blob/main/easybuttons.md)).\n- Supports small, single runner setups and scaled ones.\n- Supports GitLab.com SaaS or self-managed instances.\n- Automates OS patching and Runner version upgrading.\n- Supports Windows and Linux.\n- Can be reused with Amazon provisioning services such as Service Catalog and Control Tower.\n- Implements least privilege security throughout.\n- Supports deregistering runners on scale-in or Spot termination.\n\nA full feature list is in the document [Features of GitLab HA Scaling Runner Vending Machine for AWS](https://gitlab.com/guided-explorations/aws/gitlab-runner-autoscaling-aws-asg/-/blob/main/FEATURES.md)\n\n### Easy running\n\nWe hope that this automation will make deployment of runners of all sizes simple for you. We are open to your feedback, suggestions and contributions in the GitLab project.\n",[832,937,894,771],{"slug":3602,"featured":6,"template":678},"100-runners-in-less-than-10mins-and-less-than-10-clicks","content:en-us:blog:100-runners-in-less-than-10mins-and-less-than-10-clicks.yml","100 Runners In Less Than 10mins And Less Than 10 Clicks","en-us/blog/100-runners-in-less-than-10mins-and-less-than-10-clicks.yml","en-us/blog/100-runners-in-less-than-10mins-and-less-than-10-clicks",{"_path":3608,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3609,"content":3615,"config":3621,"_id":3623,"_type":16,"title":3624,"_source":17,"_file":3625,"_stem":3626,"_extension":20},"/en-us/blog/understand-highly-technical-spaces",{"title":3610,"description":3611,"ogTitle":3610,"ogDescription":3611,"noIndex":6,"ogImage":3612,"ogUrl":3613,"ogSiteName":692,"ogType":693,"canonicalUrls":3613,"schema":3614},"How I use analogy to design for highly technical spaces","Just how much does a designer need to know about a technical space or product to design for it?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668041/Blog/Hero%20Images/Understand-Highly-Technical-Spaces.jpg","https://about.gitlab.com/blog/understand-highly-technical-spaces","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How I use analogy to design for highly technical spaces\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Camellia Yang\"}],\n        \"datePublished\": \"2021-08-13\",\n      }",{"title":3610,"description":3611,"authors":3616,"heroImage":3612,"date":3618,"body":3619,"category":14,"tags":3620},[3617],"Camellia Yang","2021-08-13","\n\nAs a designer, you're lucky when you get to design an application you're familiar with, such as a supermarket app or a travel website – something we might have already used or even use every day. Most of the time, we are tasked with designing an application or technology that's unfamiliar or highly technical. Sometimes, we may not know what the application we're designing is used for, like creating an interface for an MRI machine controlled by a doctor, or a dashboard used by a professional musician – knowing what all the buttons do is already an achievement.\n\nOne of the trickiest questions for designers is understanding exactly how much you need to know how to use an application to design the best system for the user. This conundrum is typical for designers that work in a highly technical, enterprise space such as GitLab. The challenges can be exacerbated when working on Security products, but in my experience, we don't need to _fully understand_ the technology or space we are designing for, but we do need to have some idea of how it all works.\n\nThe most difficult part is deciding: How much knowledge is enough? How much do you need to know about a product to hold a conversation with users? Or be able to explain it to others in your own words?\n\nAll if those questions are reasonable criteria for designers to focus on, but I've found a more exciting strategy to motivate me to translate complex technical spaces into smart designs: Analogy.\n\n## Create analogies to aid the design process\n\nAs a designer, I like to focus on both my creative and analytical sides but thinking of scenarios that do not exist yet. For instance, I like to do some thought experiments where I'll position myself as different types of users while performing tasks, or pretend I'm a user and critique my own work.\n\nIt may be easier to show how I do this through some examples. Below, I'll give security technologies some new clothes through easy-to-remember stories that match up to what the security technology does.\n\nLet's start by looking at some of the standard security technology that we offer on GitLab:\n\n**Static application security testing (SAST)**: A testing methodology that analyses source code to find security vulnerabilities that make your organization's applications susceptible to attack.\n\n**Dynamic application security testing (DAST)**: A testing methodology that communicates with a web application through the web front-end to identify potential security vulnerabilities in the web application and architecture.\n\n**Fuzz testing**: An automated software testing technique that involves sending invalid, unexpected, or random data as inputs to a computer program in an attempt to get it to fail in some way.\n\nNow that we have an idea of the technologies in question, how might we understand them better through analogy?\n\nImagine a person is going to a hospital to check whether they're sick or not. Think of the SAST, DAST, and Fuzz testing technologies as different doctors with different specialties.\n\nSAST is a modern doctor who loves scanning. SAST can use an X-ray-like machine to see through the application's \"skin\". It can see if any bones are broken – and everything else that makes the application work. This is SAST's key advantage – it can see every detail of the scanned app and analyze it. It also has maps with predefined problems so SAST can compare and find the problems. In some ways, the SAST maps is what [Gray's Anatomy](https://www.amazon.com/Grays-Anatomy-Anatomical-Clinical-Practice/dp/0702052302), the seminal medical school textbook, is to a doctor's clinical practice.\n\nDAST, on the other hand, is more like your primary care physician. DAST doesn't need to know all of the details about how everything is doing inside your body (or the application). Instead, DAST talks with the app by asking questions and then observing and analyzing the responses. If the response is strange, wrong, or there is no response, DAST knows there are potential problems\n\nFuzz testing is the doctor that is a master of AI. Sometimes it also has a scanner like SAST, but it doesn't analyze in the same way. Fuzz testing has AI X-ray glasses that can mutate based on what it sees – potentially seeing even more. The analysis is the most personalized because of these mutated glasses: When the glasses see something suspicious at the shoulder area, it can change the lights and analyze it from the weirdest angle possible to match the individual shoulder. In other words, it adapts itself based on what was previously discovered and then digs deeper. Similarly, when Fuzz testing does not have a scanner, it has AI hearing, which allows it to change or mutate its questions based on the app's responses. It can possibly ask better questions as it scans to get more valuable answers to identify problems.\n\nI like to use people in my analogies because technology is so complicated but few things are as complex as humans. Creating stories about things that I can relate to in my daily life makes them more accessible.\n\nI hope you've found these examples to be a fun way to conceptualize challenging and highly technical topics. Next time you're designing in highly technical spaces, try building out relatable analogies and remove any fears of working in this space as a designer. One piece of advice: Always verify your analogies with professionals who have a deep understanding of the domain – no one will laugh at a passionate designer who tries to understand an unfamiliar world.\n",[1307,1144],{"slug":3622,"featured":6,"template":678},"understand-highly-technical-spaces","content:en-us:blog:understand-highly-technical-spaces.yml","Understand Highly Technical Spaces","en-us/blog/understand-highly-technical-spaces.yml","en-us/blog/understand-highly-technical-spaces",{"_path":3628,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3629,"content":3634,"config":3640,"_id":3642,"_type":16,"title":3643,"_source":17,"_file":3644,"_stem":3645,"_extension":20},"/en-us/blog/how-to-agentless-gitops-aws",{"title":3630,"description":3631,"ogTitle":3630,"ogDescription":3631,"noIndex":6,"ogImage":2478,"ogUrl":3632,"ogSiteName":692,"ogType":693,"canonicalUrls":3632,"schema":3633},"How to Use Push-Based GitOps with Terraform & AWS ECS/EC2","Learn how GitLab supports agentless approach for GitOps on AWS.","https://about.gitlab.com/blog/how-to-agentless-gitops-aws","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use a push-based approach for GitOps with Terraform and AWS ECS and EC2\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2021-08-10\",\n      }",{"title":3635,"description":3631,"authors":3636,"heroImage":2478,"date":3637,"body":3638,"category":14,"tags":3639},"How to use a push-based approach for GitOps with Terraform and AWS ECS and EC2",[1101],"2021-08-10","\n\nIn [part two of our GitOps series](/blog/how-to-agentless-gitops-vars/), we described how to use a push-based (or agentless) approach for [GitOps](/topics/gitops/) by using GitLab scripting capabilities as well as integrating infrastructure-as-code tools into GitOps pipelines. In this third blog post, we’ll also dig deep into how to use a push-based approach, but this time our focus will be on the integrations of Terraform, AWS ECS, and AWS EC2 in GitOps flows. This approach may be preferable when using infrastructure components that aren't Kubernetes, such as VMs, physical devices, and cloud-provider services.\n\nSimilar to Ansible – an agentless IT automation solution – Terraform can be leveraged by the scripting capabilities of GitLab to shape your infrastructure. GitLab also provides out-of-the-box integrations with Terraform, such as GitLab-managed Terraform state and Terraform plan reports in merge requests.\n\n## GitOps flows with GitLab and Terraform\n\nIn this section, we explain how to use GitLab and Terraform for a non-Kubernetes GitOps flow and Kubernetes GitOps.\n\n### GitLab and Terraform for non-K8s infrastructure\n\nGitLab leverages Terraform to provision a non-Kubernetes infrastructure component, namely a MySQL database running on AWS.\n\nNote: Ideally, the provisioning of a database should be an on-demand, self-service process that developers can just use. We use this scenario to illustrate a GitOps flow using a non-Kubernetes infrastructure component.\n\n#### How collaboration works in GitLab\n\nSasha, a developer, creates an issue and assigns the issue to Sidney, the database administrator, who then creates a Merge Request (MR) to start her work and invite collaboration with other stakeholders across the organization. Opening the MR automatically creates a feature branch for the GitLab project. Sidney uses Terraform to create an infrastructure-as-code configuration for the database, named `mysqlmain.tf`. The database happens to be an AWS RDS MySQL instance. The database Terraform configuration file should look like this:\n\n![Terraform configuration file for MySQL database](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/0-tf-mysqlmain-created.png){: .shadow.small.center.wrap-text}\nTerraform configuration file for MySQL database.\n{: .note.text-center}\n\nTake note of the version of the database (`engine_version`), the database storage (`allocated_storage`), and the embedded database admin user (`username`) and password, in the image above.\n\nAs soon as Sidney adds the file `mysqlmain.tf` file to the feature branch, a pipeline is automatically executed by GitLab in the MR. As part of the review process, a \"Terraform plan\" is executed against the Terraform files and the output is attached to the MR as an artifact:\n\n![Terraform plan output attached to Merge Request](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/1-tf-report-in-MR.png){: .shadow.small.center.wrap-text}\nTerraform plan output attached to MR.\n{: .note.text-center}\n\nIn the picture above, you can see the note \"1 Terraform report was generated in your pipelines\". You can click on the `View full log` button to see the output file of the \"Terraform plan\" command that was run against the new configuration file, as seen below:\n\n![Terraform plan output detailed log view](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/2-tf-plan-output.png){: .shadow.small.center.wrap-text}\nTerraform plan output detailed log view.\n{: .note.text-center}\n\nThe Terraform output shows that a database will be created once this configuration file is applied to the infrastructure. The artifacts attached to an MR provide information that can help stakeholders review the proposed changes. The Terraform output in the MR fosters collaboration between stakeholders, and leads to infrastructure that is more consistent, resilient, reliable, and stable, and helps prevent unscheduled outages.\n\nIn the image below, we see how reviewers can collaborate in GitLab. The screenshow shows that the original requester, Sasha, notices that a database storage of 5 GB is too small, so she makes an inline suggestion to increase the database storage capacity to 10 GB.\n\n![Inline suggestion to increase database storage to 10GB](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/3-tf-inline-suggestion-by-Sasha.png){: .shadow.small.center.wrap-text}\nInline suggestion to increase database storage to 10GB.\n{: .note.text-center}\n\nInline suggestions foster collaboration and help increase developer productivity suggested changes can be added with the click of a button.\n\nNext, Sidney invites DevOps engineer Devon to collaborate on the MR. Devon notices that the database version in the configuration file is not the latest one. He proceeds to make an inline suggestion proposing a more up-to-date version for Sidney to review:\n\n![Inline suggestion to update database version](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/4-tf-inline-suggestion-by-Devon.png){: .shadow.small.center.wrap-text}\nInline suggestion to update database version.\n{: .note.text-center}\n\nSidney can monitor the discussion between code reviewers on the MR by tracking the number of unresolved threads. So far, there are four unresolved threads:\n\n![Number of unresolved threads displayed at the top of the MR](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/5-tf-unresolved-threads-for-Sidney.png){: .shadow.small.center.wrap-text}\nNumber of unresolved threads displayed at the top of the MR.\n{: .note.text-center}\n\nSidney starts resolving the threads by following the convenient thread navigation provided by GitLab, which makes it easy for her to process each of the proposed review items. Sidney just needs to click \"Apply suggestion\" to accept an input from a reviewer:\n\n![Applying a suggestion with a single button click](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/6-tf-apply-inline-suggestion-by-Sidney.png){: .shadow.small.center.wrap-text}\nApplying a suggestion with one click.\n{: .note.text-center}\n\nDevon suggested replacing the embedded database admin username and password with a parameter in the inline review, so Sidney replaces the embedded values with variables. The variable values will be managed by masked variables within GitLab:\n\n![Parameterizing variables in Terraform configuration file](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/7-tf-parameterizing-vars-by-Sidney.png){: .shadow.small.center.wrap-text}\nParameterizing variables in Terraform configuration file.\n{: .note.text-center}\n\nOnce the threads are resolved and the stakeholders involved in thh MR finish collaborating, it's time to merge.\n\nLearn more about how GitLab fosters collaboration using the principles of GitOps in the video below:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/onFpj_wvbLM\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\nIn this next example, Sasha is the one merging the MR:\n\n![Merge Request with infrastructure updates being merged](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/8-tf-MR-merged.png){: .shadow.small.center.wrap-text}\nMR with infrastructure updates being merged.\n{: .note.text-center}\n\nMerging automatically launches a pipeline that will apply the changes to the infrastructure:\n\n![GitOps pipeline completed execution](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/9-tf-pipeline-complete.png){: .shadow.small.center.wrap-text}\nGitOps pipeline completed execution.\n{: .note.text-center}\n\n#### CI/CD with non-K8s infrastructure\n\nThe CI/CD pipeline in the previous example works by validating the infrastructure configuration files. Then the pipeline validates the proposed updates against the current state of the infrastructure. Finally, it applies the updates to the production infrastructure.\n\nRunning this GitOps flow results in a brand new MySQL database on AWS RDS:\n\n![A new MySQL database has been created via a GitOps flow](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/10-db-ready.png){: .shadow.small.center.wrap-text}\nA new MySQL database has been created via a GitOps flow.\n{: .note.text-center}\n\nBy checking the details of the new MySQL database you can corroborate that the database storage is 10 GB and that the database version is the most current\"\n\n![Resulting MySQL database configuration from the collaboration of stakeholders](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/11-db-version-and-10g-storage.png){: .shadow.small.center.wrap-text}\nThe MySQL database configuration built by team member collaboration.\n{: .note.text-center}\n\nIn the next section, we look at how a similar GitOps flow can be applied to a Kubernetes cluster.\n\n### GitLab and Terraform for K8s infrastructure\n\nWe skip past all the collaboration steps to focus on a change to the EKS cluster Terraform configuration file. In the picture below, a user is changing the minimum size of the autoscaling group of the EKS cluster from one to two:\n\n![Raising autoscaling group minimum to 2](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/12-worker-nodes-to-two.png){: .shadow.small.center.wrap-text}\nIncreasing autoscaling group minimum to two.\n{: .note.text-center}\n\nWhen the stakeholder commits the change in the MR, a CI/CD pipeline validates the configuration, performs a plan against production, and applys the updates to the production infrastructure. After the pipeline finishes, the user can log into the Amazon EC2 console to verify that the EKS cluster now has a minimum of two nodes in its autoscaling group:\n\n![GitOps flow modified the number of worker nodes in K8s cluster](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/13-two-worker-nodes-on-AWS.png){: .shadow.small.center.wrap-text}\nGitOps flow modified the number of worker nodes in K8s cluster.\n{: .note.text-center}\n\nSee this scenario in action by watching the [GitOps presentation](/topics/gitops/gitops-multicloud-deployments-gitlab/) on our GitOps topics page.\n\n## GitOps flows for non-K8s (like ECS, EC2)\n\nGitLab also provides Auto Deploy capabilities to streamline application deployment to ECS and EC2, so you can shape infrastructure as desired.\n\n### Deploying to Amazon ECS\n\nAfter creating your ECS cluster, GitLab can deliver your application and its infrastructure to the cluster by including the ECS Deployment template in your `gitlab-ci.yml`, using CI/CD.\n\n```\ninclude:\nTemplate: AWS/Deploy-ECS.gitlab-ci.yml\n```\n\nNext, create the `ECS Task Definition` file in your project that specifies your app's infrastructure requirements, along with other details.\n\n![ECS Task Definition file snippet](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/14-ECS-taskdef-file.png){: .shadow.small.center.wrap-text}\nECS Task Definition file snippet.\n{: .note.text-center}\n\nFinally, define the project variable that will drive the template:\n\n![Project variables required to auto-deploy to ECS](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/15-ECS-related-vars.png){: .shadow.small.center.wrap-text}\nProject variables required to auto-deploy to ECS.\n{: .note.text-center}\n\nThe ECS deployment template does the rest, including support review pipelines.\n\n![Review pipeline in GitOps flow](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/16-ECS-review-pipeline.png){: .shadow.small.center.wrap-text}\nReview pipeline in GitOps flow.\n{: .note.text-center}\n\nIn the review pipeline above, stakeholders can review the proposed changes before sending to production. The two screenshots below show different aspects of the proposed changes in the log output of the `review_fargate` job:\n\n![Configuring load balancers in ECS](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/17-review-fargate-log-begin.png){: .shadow.small.center.wrap-text}\nConfigure load balancers in ECS.\n{: .note.text-center}\n\nSee the configuration for infrastructure components like load balancers in the image above. The image below shows infrastructure components like subnets, security groups, and the assignment of a public IP address:\n\n![Configuring subnets, security groups in ECS](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/18-review-fargate-log-middle.png){: .shadow.small.center.wrap-text}\nConfiguring subnets and security groups in ECS.\n{: .note.text-center}\n\nOnce all stakeholders are done collaborating on a proposed change to the production infrastructure, the updates are applied using a CI/CD pipeline. Below is an example of this type of pipeline:\n\n![Applying infrastructure updates to production](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/19-ECS-prod-pipeline.png){: .shadow.small.center.wrap-text}\nApplying infrastructure updates to production.\n{: .note.text-center}\n\nRead our documentation to learn more about [how GitLab users can Auto Deploy to ECS](https://docs.gitlab.com/ee/ci/cloud_deployment/#deploy-your-application-to-the-aws-elastic-container-service-ecs).\n\n### Deploying to Amazon EC2\n\nGitLab also provides a built-in template to provision infrastructure and deploy your applications to EC2 as part of Auto DevOps. The template:\n\n- Provisions infrastructure using AWS CloudFormation\n- Pushes application to S3\n- Deploys your application from S3 to EC2\n\nEach of these steps requires a JSON configuration file. Below is an example of a portion of a CloudFormation Stack JSON file used to create your infrastructure:\n\n![CloudFormation stack JSON snippet](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/20-EC2-portion-stack-file.png){: .shadow.small.center.wrap-text}\nCloudFormation stack JSON snippet.\n{: .note.text-center}\n\nThe JSON used by the Auto Deploy template to push your application to S3 would look similar to this:\n\n![JSON to push application to S3](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/21-EC2-push-file.png){: .shadow.small.center.wrap-text}\nJSON to push application to S3.\n{: .note.text-center}\n\nAnd the file used for the actual deployment of your application from S3 to EC2 would be like the following:\n\n![JSON to deploy application to EC2](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-aws/22-EC2-deploy-file.png){: .shadow.small.center.wrap-text}\nJSON to deploy application to EC2.\n{: .note.text-center}\n\nAfter creating these files, you need to create the following variables in your project - displayed here with some sample values:\n\n```\nvariables:\n  CI_AWS_CF_CREATE_STACK_FILE: 'aws/cf_create_stack.json'\n  CI_AWS_S3_PUSH_FILE: 'aws/s3_push.json'\n  CI_AWS_EC2_DEPLOYMENT_FILE: 'aws/create_deployment.json'\n  CI_AWS_CF_STACK_NAME: 'YourStackName'\n```\n\nThe last step is to include the template in your `.gitlab-ci.yml` file:\n\n```\ninclude:\n  - template: AWS/CF-Provision-and-Deploy-EC2.gitlab-ci.yml\n```\n\nMore details on [how GitLab uses Auto Deploy to EC2 are available in the documentation](https://docs.gitlab.com/ee/ci/cloud_deployment/#provision-and-deploy-to-your-aws-elastic-compute-cloud-ec2).\n\n## Agent or agentless: GitLab has your GitOps flows covered\n\nWhether your situation calls for an agent-based/pull-approach to doing GitOps, or for an agentless/push-approach, GitLab has your back. GitLab offers the flexibility to choose the approach to GitOps that best fits your specific projects or applications. GitLab also supports many types of infrastructures – from physical components and virtual machines, Kubernetes and containers, as well as infrastructure-as-code tools like Terraform, Ansible, and AWS Cloud Formation.\n",[535,894,2932],{"slug":3641,"featured":6,"template":678},"how-to-agentless-gitops-aws","content:en-us:blog:how-to-agentless-gitops-aws.yml","How To Agentless Gitops Aws","en-us/blog/how-to-agentless-gitops-aws.yml","en-us/blog/how-to-agentless-gitops-aws",{"_path":3647,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3648,"content":3653,"config":3660,"_id":3662,"_type":16,"title":3663,"_source":17,"_file":3664,"_stem":3665,"_extension":20},"/en-us/blog/achieving-23-cost-savings-and-36-performance-gain-using-gitlab-and-gitlab-runner-on-arm-neoverse-based-aws-graviton2-processor",{"title":3649,"description":3650,"ogTitle":3649,"ogDescription":3650,"noIndex":6,"ogImage":1579,"ogUrl":3651,"ogSiteName":692,"ogType":693,"canonicalUrls":3651,"schema":3652},"GitLab on Graviton2: 23% cheaper, 36% higher performance","GitLab and GitLab Runner Performance Gains on Arm based AWS Graviton2","https://about.gitlab.com/blog/achieving-23-cost-savings-and-36-performance-gain-using-gitlab-and-gitlab-runner-on-arm-neoverse-based-aws-graviton2-processor","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"23% Cost savings and 36% performance gain by deploying GitLab on Arm-based AWS Graviton2\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Pranay Bakre\"}],\n        \"datePublished\": \"2021-08-05\",\n      }",{"title":3654,"description":3650,"authors":3655,"heroImage":1579,"date":3657,"body":3658,"category":14,"tags":3659},"23% Cost savings and 36% performance gain by deploying GitLab on Arm-based AWS Graviton2",[3656],"Pranay Bakre","2021-08-05","\n\nCompanies in all industries and sectors have significantly invested in digital transformation and increased their software development capabilities. GitLab delivers modern DevOps with a complete DevOps platform. However, some organizations require self-managed GitLab and GitLab Runners, which creates added costs for hosting and running GitLab infrastructure. Our latest cost analysis and performance benchmarks show that customers can realize cost savings of up to 23% and performance gains of up to 36% by deploying the GitLab application and GitLab Runner on the Arm-based Graviton2 when compared to the x86 based M5/C5 EC2 instances.\n\n[Arm](https://www.arm.com/) is a leading provider of silicon intellectual property (IP) for intelligent systems-on-chip (SoC) that power billions of devices. [GitLab and Arm have collaborated](/blog/gitlab-arm-aws-graviton2-solution/) closely to make GitLab tools available for devices based on Arm architecture. AWS is the first major public cloud provider to offer Arm-based EC2 compute instances powered by Graviton2 processors built upon Arm Neoverse N1 IP cores.\n\n## Performance benchmarks for GitLab 10,000 reference architecture\n\nGitLab is a highly scalable and modular application and can accomodate 10 users to 10,000 as a business scales. Today, the [GitLab 10,000 reference architecture](https://docs.gitlab.com/ee/administration/reference_architectures/10k_users.html) provides users with a blueprint for hosting GitLab on x86_64-backed compute on leading cloud platforms providers. Building upon our collaboration from last year, the next step was to include Arm64 backed compute in the reference architecture.\n\nFor the research, we first ran the performance benchmarks comparing the cost of hosting GitLab's [Reference Architecture](https://docs.gitlab.com/ee/administration/reference_architectures/) for up to 10,000 users on Arm64 and x86 environments on AWS. We found that GitLab customers can realize up to 23% cost savings on their AWS bill by deploying GitLab on Graviton2-based EC2 instances over comparable x86-based EC2 instances for about the same level of performance. See the monthly AWS cost for running this scenario on the [Arm64 environment](https://calculator.s3.amazonaws.com/index.html#r=IAD&s=EC2&key=files/calc-4f854bec29723ed3fa0f209ca0fddf3495447e8f&v=ver20210322c7) and [x86 environment](https://calculator.s3.amazonaws.com/index.html#r=IAD&s=EC2&key=files/calc-8c66ad5bfb008a1f0f21c779fcc336418ae1e83a&v=ver20210322c7).\n\nThe figure below shows the components that make up the GitLab 10,000 reference architecture:\n\n![GitLab architecture](https://about.gitlab.com/images/blogimages/gitlab_arch.png){: .shadow.medium.center}\nAn example of components that make up a 10,000 user GitLab architecture.\n{: .note.text-center}\n\nRead more about the [components required to set up the 10,000 architecture](https://docs.gitlab.com/ee/administration/reference_architectures/10k_users.html).\n\nFor testing, we used the [GitLab Performance Tool](https://gitlab.com/gitlab-org/quality/performance) developed in-house by GitLab to test the performance of GitLab. Below is a high-level view of the different kinds of tests generated by GPT.\n\n![GPT tests](https://about.gitlab.com/images/blogimages/gpt_tests.png){: .shadow.medium.center}\nThe different types of tests created by the GitLab Performance Tool.\n{: .note.text-center}\n\nAll data was generated under a group named `gpt` and split into two areas: vertical and horizontal. The vertical area consists of one or more large projects that are considered a good and representative size for performance testing. The horizontal area consists of a large number of subgroups that have a large number of projects. All of these subgroups are saved under the parent subgroup `gpt/many_groups_and_projects`.\n\nWe also used the GitLab Environment Toolkit (GET), a provisioning and configuration toolkit, for deploying GitLab's Reference Architectures with Terraform and Ansible.\n\n## About performance benchmarking for the self-managed GitLab Runner\n\nGitLab Runner is the open source application that runs GitLab CI/CD jobs on various computing platforms and operating systems. The GitLab Runner has supported Arm architecture since [GitLab 12.6](/releases/2019/12/22/gitlab-12-6-released/), which allows users to run CI/CD jobs natively on Arm.\n\nWe ran the performance benchmark results for the GitLab Runner by compiling a standard Linux kernel on M6g and M5 instances. In this case, we demonstrated 36% performance gain on M6g instances under 100% CPU utilization. For example, it took 7 minutes and 53 seconds to compile Linux kernel on M5.xlarge (4 core) instances, whereas it only took 5 minutes 47 seconds on M6g.xlarge (4 core) instances.\n\nThe figure below shows that the architecture of the test setup for the GitLab runner we used to benchmark the performance based on the [GitLab Runner stress test repository](https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-stress).\n\n![GitLab Runner test configuration](https://about.gitlab.com/images/blogimages/gl_runner_test_config.JPG){: .shadow.medium.center}\nThe architecture of the GitLab Runner used to benchmark performance.\n{: .note.text-center}\n\nWe used Prometheus and Grafana to obtain the CPU utilization graphs for both M6g and M5 instances from the Runner. The diagrams below show we have 100% CPU utilization on both Arm and x86 environments, and we are still able to achieve a 36% performance gain with the GitLab Runners on Arm-based M6g instances.\n\n![Runner on ARM - CPU utilization](https://about.gitlab.com/images/blogimages/runner_arm_cpu_perf_1.png){: .shadow.medium.center}\nInside CPU use on Arm environment.\n{: .note.text-center}\n\n![Runner on x86-64 - CPU utilization](https://about.gitlab.com/images/blogimages/runner_x86_cpu_perf_1.png){: .shadow.medium.center}\nCPU use on x86 environments.\n{: .note.text-center}\n\nUsers can benefit from the 36% performance gain for CI job execution and the roughly 23% per month in cost savings for executing CI jobs. The savings can be significant for customers that consume about 500,000 CI compute minutes per month.\n\n## GitLab customers increase performance and decrease cost by moving to Arm\n\nGitLab enterprise customers can gain 36% in performance improvements and 23% cost savings by deploying GitLab and GitLab Runner on AWS Graviton2-based EC2 instance. If your company's cloud infrastructure is on AWS, then you should consider whether moving workloads to Arm-based Graviton2 instances is suitable for your organization.\n\nCheck out this [repository for resources for getting started with AWS Graviton processors](https://github.com/aws/aws-graviton-getting-started) and information on supported operating systems and software. Feel free to open an [issue](https://github.com/aws/aws-graviton-getting-started/issues) if you have questions or need more help.\n\n_Join us at [Arm DevSummit 2021](https://devsummit.arm.com/en) to learn more about GitLab performance benchmarking and other topics._\n",[832],{"slug":3661,"featured":6,"template":678},"achieving-23-cost-savings-and-36-performance-gain-using-gitlab-and-gitlab-runner-on-arm-neoverse-based-aws-graviton2-processor","content:en-us:blog:achieving-23-cost-savings-and-36-performance-gain-using-gitlab-and-gitlab-runner-on-arm-neoverse-based-aws-graviton2-processor.yml","Achieving 23 Cost Savings And 36 Performance Gain Using Gitlab And Gitlab Runner On Arm Neoverse Based Aws Graviton2 Processor","en-us/blog/achieving-23-cost-savings-and-36-performance-gain-using-gitlab-and-gitlab-runner-on-arm-neoverse-based-aws-graviton2-processor.yml","en-us/blog/achieving-23-cost-savings-and-36-performance-gain-using-gitlab-and-gitlab-runner-on-arm-neoverse-based-aws-graviton2-processor",{"_path":3667,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3668,"content":3674,"config":3680,"_id":3682,"_type":16,"title":3683,"_source":17,"_file":3684,"_stem":3685,"_extension":20},"/en-us/blog/ubs-gitlab-devops-platform",{"title":3669,"description":3670,"ogTitle":3669,"ogDescription":3670,"noIndex":6,"ogImage":3671,"ogUrl":3672,"ogSiteName":692,"ogType":693,"canonicalUrls":3672,"schema":3673},"How UBS created their own DevOps platform using GitLab","How GitLab helped power more than a million builds in six months on UBS DevCloud.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665839/Blog/Hero%20Images/devops.png","https://about.gitlab.com/blog/ubs-gitlab-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How UBS created their own DevOps platform using GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2021-08-04\",\n      }",{"title":3669,"description":3670,"authors":3675,"heroImage":3671,"date":3677,"body":3678,"category":14,"tags":3679},[3676],"Sara Kassabian","2021-08-04","\n\nUBS, the largest truly global wealth manager, uses GitLab to power DevCloud, a single [DevOps platform](/solutions/devops-platform/) that allows for a cloud-based, service-oriented, software development lifecycle.\n\n\"GitLab is a fundamental part of DevCloud,\" said [Rick Carey](https://www.bloomberg.com/profile/person/20946258), Group Chief Technology Officer at UBS. \"We wouldn't be able to have that seamless experience without GitLab. It allowed us to pull ahead of many of our competitors, and break down the barriers between coding, testing, and deployment.\"\n\nDuring GitLab Virtual Commit 2021, Rick and [Eric Johnson](/company/team/#edjdev), Chief Technology Officer at GitLab, talked about how building DevCloud on GitLab's DevOps Platform allowed UBS to increase their development velocity, lower their infrastructure costs, and increase collaboration between engineers and non-engineering teams worldwide.\n\n## How engineers used DevCloud to collaborate during UBS Hackathon\n\nThe annual [UBS Hackathon](https://www.ubs.com/global/en/our-firm/what-we-do/technology/2020/hackathon-2020.html), which typically brings together engineers from around the world in one room, went virtual in 2020 due to the COVID-19 pandemic. UBS did a soft launch of the DevCloud platform during the 2020 Hackathon to have a truly global development and seamless team experience among the more than 500 participants dispersed worldwide.\n\n\"It was hard to pick a winner, because nearly every program and team built something absolutely incredible in such a short amount of time,\" said Rick. \"They got so much done that even while chatting with each other, they said, 'I can't believe how easy it is to get this done.'\n\nOnce this Hackathon was successful, we knew that we were going to be able to migrate the rest of our engineers to DevCloud.\"\n\n## Open source collaboration benefitted UBS and GitLab\n\n\"I must say it's uncommon in my experience to see such a large organization let alone one in such a compliance-driven industry as finance take on such a large project and deliver it on time,\" Eric said.\n\nRick attributes part of that success to GitLab's commitment to open source collaboration, which allowed UBS to turn to GitLab team members with questions.\n\n\"In an open source model, every time there was a gap, or an issue, or something we just needed your help with, we could reach out to GitLab and say, 'Can we work on this together? Is there a way to improve this?'\", said Rick. \"That's the value, and that's one of the reasons we went with GitLab.\"\n\nIt wasn't a one-way relationship. Eric said that GitLab learned a lot about compliance and risk processes that are unique to the financial sector by collaborating on open source projects with UBS.\n\n\"Collaboration is one of the GitLab's core values – which was key to this project. We set common goals. We're in constant communication, and we're always working together to remove roadblocks. Working with UBS's engineers is a truly agile experience,\" said Eric.\n\nGitLab forums have a lot of contributions from UBS team members, and both UBS and GitLab are members of open source communities such as the Fintech Open Source Foundation (FINOS) and Cloud Native Computing Foundation (CNCF).\n\n## How adopting DevCloud paid off for UBS\n\nOne of the key messages for why adopting a single DevOps platform such as GitLab or DevCloud benefits engineering teams is the productivity pay-off – for engineers and non-engineers alike.\n\nSimilar to GitLab, which enables simple asynchronous collaboration between team members, DevCloud was built with engineers in mind but so everyone can contribute. Rick said that one of the best pieces of feedback he got on DevCloud was from someone on the business side of UBS, who wanted to do some development projects but struggled with other tools.\n\n\"He said, 'Oh, that's DevCloud? I love DevCloud,'\" said Rick.\n\nIn the roughly six months since UBS launched DevCloud, there have been more than 12,000 users and more than one million successful builds.\n\n## What's next?\n\nIn June 2021, [GitLab acquired machine learning company UnReview](/press/releases/2021-06-02-gitlab-acquires-unreview-machine-learning-capabilities.html) which has allowed us to improve our machine learning capabilities as part of our DevOps Platform. Eric said that by practicing applied machine learning, specifically for code review, GitLab should be able to balance review workloads across teams to increase efficiency.\n\nKeeping all the DevOps activities in a single application makes it easier to extract insights throughout the software development lifecycle. By adding machine learning to a DevOps Platform such as GitLab or DevCloud, teams can not only derive data from past activities, but start to predict the future.\n\n \"We were very impressed by UBS's development culture,\" said Eric. \"It is very complimentary to our own, and we look forward to our continued partnership.\"\n\n## More of a video person?\n\nThis conversation was part of GitLab Virtual Commit 2021. Watch the video below to see the full conversation between Eric and Rick.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/Tof-7fDultw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[894,703,832,1347],{"slug":3681,"featured":6,"template":678},"ubs-gitlab-devops-platform","content:en-us:blog:ubs-gitlab-devops-platform.yml","Ubs Gitlab Devops Platform","en-us/blog/ubs-gitlab-devops-platform.yml","en-us/blog/ubs-gitlab-devops-platform",{"_path":3687,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3688,"content":3694,"config":3700,"_id":3702,"_type":16,"title":3703,"_source":17,"_file":3704,"_stem":3705,"_extension":20},"/en-us/blog/velocity-with-confidence",{"title":3689,"description":3690,"ogTitle":3689,"ogDescription":3690,"noIndex":6,"ogImage":3691,"ogUrl":3692,"ogSiteName":692,"ogType":693,"canonicalUrls":3692,"schema":3693},"How GitLab 14 satisfies the need for speed with modern DevOps","GitLab 14: Ship with velocity, ship with confidence","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682089/Blog/Hero%20Images/racecar_devops.jpg","https://about.gitlab.com/blog/velocity-with-confidence","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab 14 satisfies the need for speed with modern DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Parker Ennis\"}],\n        \"datePublished\": \"2021-07-29\",\n      }",{"title":3689,"description":3690,"authors":3695,"heroImage":3691,"date":3697,"body":3698,"category":14,"tags":3699},[3696],"Parker Ennis","2021-07-29","\n\n## How DevOps and NFS changed the game\n\nWhat if I told you that one of the best-selling racing video game franchises of all time, the \"Need For Speed\" (NFS), and DevOps have more in common with each other than you think? Yes, you read that correctly, probably not the NFS (Network File System) you were expecting.\n\n### An appetite for change\n\nFor context, the NFS series originally set out to redefine a saturated, yet unsophisticated, racing video game market. Motivated by an appetite for change, the NFS user experience reflected the human connection to real cars and how they behaved, which was a big challenge for developers in the 1990s. Nearly 30 years ago, \"The Need for Speed\" forever changed the landscape of racing games, selling 150 million copies since its debut.\n\n![The original Need For Speed game from 1994](https://about.gitlab.com/images/blogimages/need_for_speed.png){: .shadow.center}\nThe original Need For Speed video game set a new standard with an appetite for industry change.\n{: .note.text-center}\n\nCoincidentally, it was in 1994 that Grady Booch coined the term \"continuous integration\" (CI). Booch, like NFS, paved the way for immense industry growth in the realm of software development. CI aimed to redefine the manual, time-consuming development processes that paid little mind to how real humans and developers behaved and collaborated around application development by [leveraging automation to increase development speed without sacrificing quality](/topics/ci-cd/benefits-continuous-integration/).\n\nSimilar to how NFS took the racing scene by storm and laid the groundwork for the racing game genre, CI evolved into what is arguably the most important piece of DevOps best practices today: Continuous integration and continuous delivery (CI/CD).\n\nDevOps continues to evolve, but without CI/CD, DevOps isn't the collaborative practice that helps teams work faster and more efficiently. CI/CD is a super power within DevOps – unlocking the potential to ship apps with increased velocity and confidence in their quality, without having to choose one or the other.\n\n### DIY DevOps vs Modern DevOps\n\nToday, it doesn't matter what your business does, it's going to involve some amount of using and building software. DevOps gained traction in the age of digital transformation, where the rate of technical innovation acted as a forcing function for companies to fail or survive. Over the past 10 years or so, organizations had a choice to either embrace this \"need for speed\" and adopt DevOps practices, or be displaced by their competition.\n\nThis scramble led to a \"DIY\" style of DevOps that couldn't deliver on its promises much of the time. For many organizations, the biggest problem wasn't just the brittle toolchains composed of disparate pieces of software but also trying to make these complicated toolchains and processes benefit from DevOps. Since uprooting everything wasn't an option, the root of the problem was still there, and DevOps was hard to adopt.\n\nFor all the teams DevOps has helped, the DevOps marketplace must continuously improve and evolve as we learn more about the challenges of modernizing workflows. DevOps must modernize alongside businesses to ensure it's an accessible and realistic framework for as many companies as possible to leverage.\n\n### GitLab 14 fuels the modern DevOps need for speed\n\nWith a platform-driven approach, [GitLab 14](/releases/2021/06/22/gitlab-14-0-released/) delivers a consistent and efficient developer and operator experience that leads to a simplified and more predictable SDLC. A single user interface, embedded security, and a unified data store are just some of the features of a platform any company can use without the tradeoffs of the DIY DevOps past. By using one tool for source code management, CI, and CD, teams are more efficient and productive with streamlined collaboration. Engineers are happier when focused on value-add than when maintaining integrations – and happy developers help attract and retain talent.\n\n[GitLab 14](/gitlab-14/) ushers in a new era of modern DevOps as a global movement, and I'm excited to talk a little bit about some of its capabilities that help you ship software faster, with a higher degree of confidence, and improve your ability to respond to market changes.\n\n### Ship with velocity and confidence\n\n**1. [GitLab pipeline editor](/releases/2021/01/22/gitlab-13-8-released/#pipeline-editor)**\n\nCrafting pipelines can be complicated and verbose without an understanding of advanced pipeline syntax and how it fits within the workflow using the '.gitlab-ci.yml' configuration file. Needing to craft pipelines from scratch presents a steeper learning curve for organizations and teams with a less mature DevOps culture. The GitLab pipeline editor lowers the barrier to entry for CI/CD novices and accelerates power users with visual authoring and versioning, continuous validation, and pipeline visualization. Whether you're a more advanced user or novice, the pipeline editor unlocks additional power and usability.\n\n![Pipeline editor linting capability makes pipeline authoring easier](https://about.gitlab.com/images/blogimages/lint_ci.png){: .shadow.center}\nPipeline editor linting capability makes pipeline authoring easier and more efficient.\n{: .note.text-center}\n\nHere's what some of our wider community is saying about the pipeline editor:\n\n> \"I really like the direction of making CI/CD more accessible to first-time users and how GitLab rolls out this feature piece by piece.\" - Bernhard Knasmüller, computer scientist\n\n> \"This is going to improve the CI/CD configuration experience greatly!\" - Olivier Jourdan, developer\n\n**2. [GitLab Agent for Kubernetes](https://youtu.be/17O_ARVaRGo)**\n\nThe GitLab Agent for Kubernetes enables secure, cloud-native [GitOps](/solutions/gitops/). GitLab also meets customers where they are by supporting GitOps with agent-based and agentless approaches, and for deployments anywhere, regardless of whether infrastructure is cloud-native. It also enables alerts based on network policies for pull-based deployments.\n\nHere's piece of feedback from the wider GitLab community on the Kubernetes Agent:\n\n> \"GitLab is leading the evolution of DevOps by optimising work efficiency and cloud-native integration capabilities. This enables the rapid delivery of digital value.\" - Vasanth Kandaswamy, Head of Data and Applications Portfolio, Fujitsu Australia\n\nWe look forward to iterating and improving these capabilities and always [welcome your feedback](/submit-feedback/#product-feedback) on our product.\n\n### What's next?\n\nOne thing is for sure: **people want to go fast,** but not when it requires sacrificing peace of mind and quality. We're committed to helping you ship with velocity and confidence by [investing in specific product areas](/direction/#fy22-product-investment-themes) to bring the benefits of modern DevOps to anyone using GitLab to deliver their applications.\n\n![Go fast with confidence](https://about.gitlab.com/images/blogimages/gofast.gif){: .shadow.center}\nEven Ricky Bobby from Talledega Nights agrees. People just want to go fast!\n{: .note.text-center}\n\nWe'll continue executing on our [vision for CI/CD](https://gitlab.com/groups/gitlab-org/-/epics/4534) to create a visual pipeline authoring experience built right into GitLab that simplifies the complexity, letting you quickly create and edit pipelines while still exposing advanced options when you need them.\n\nWe're also committed to making sure you can deploy anytime and anywhere to take advantage of the benefits of Kubernetes, no matter where you are at on your cloud native development journey. If you have feedback or suggestions on what we can do better, please [let us know in our product epic.](https://gitlab.com/groups/gitlab-org/-/epics/3329)\n\nWe look forward to delivering you more value as we iterate upon this new era of GitLab 14 going foward and can't wait to see the great things you're creating with Gitlab.\n\n_This blog is part three in a three-part series on the top capabilities of GitLab 14. Learn more about [how GitLab 14 prepares you for DevSecOps 2.0 in part one](/blog/are-you-ready-for-the-newest-era-of-devsecops/), and about [how to optimize DevOps with GitLab 14's enhanced visibility tools in part two](/blog/optimizing-devops-visibility-in-gitlab-14/)._\n\nCover image by [CHUTTERSNAP](https://unsplash.com/@chuttersnapk) on [Unsplash](https://unsplash.com/photos/5Yo1P9ErikM)\n{: .note}\n",[894,915,832,937,535],{"slug":3701,"featured":6,"template":678},"velocity-with-confidence","content:en-us:blog:velocity-with-confidence.yml","Velocity With Confidence","en-us/blog/velocity-with-confidence.yml","en-us/blog/velocity-with-confidence",{"_path":3707,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3708,"content":3714,"config":3720,"_id":3722,"_type":16,"title":3723,"_source":17,"_file":3724,"_stem":3725,"_extension":20},"/en-us/blog/secure-container-images-with-gitlab-and-grype",{"title":3709,"description":3710,"ogTitle":3709,"ogDescription":3710,"noIndex":6,"ogImage":3711,"ogUrl":3712,"ogSiteName":692,"ogType":693,"canonicalUrls":3712,"schema":3713},"How to secure your container images with GitLab and Grype","Learn how to start detecting vulnerabilities in your container images in just a few steps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671873/Blog/Hero%20Images/logos_header.jpg","https://about.gitlab.com/blog/secure-container-images-with-gitlab-and-grype","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to secure your container images with GitLab and Grype\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dan Luhring\"}],\n        \"datePublished\": \"2021-07-28\",\n      }",{"title":3709,"description":3710,"authors":3715,"heroImage":3711,"date":3717,"body":3718,"category":14,"tags":3719},[3716],"Dan Luhring","2021-07-28","> Support for the Grype scanner in the GitLab Container Scanning analyzer is being deprecated in GitLab 16.9 and will be removed in GitLab 17.0. Users are advised to use the default setting for `CS_ANALYZER_IMAGE`, which uses the Trivy scanner. Users who desire to continue using Grype can use the [Security Scanner Integration\ndocumentation](https://docs.gitlab.com/ee/development/integrations/secure.html) to create their own integration with GitLab.\n\n## The importance of container image security\n\nThanks to containers, what it means to \"ship software\" has changed dramatically. Engineering teams have shifted to produce container images and use these container images to deploy their software. Because of this change teams are now shipping significantly more software alongside their app – whether they realize it or not.\n\nBesides packaging an application, container images also include hundreds of binaries and libraries. These binaries and libraries are included in the container image produced by the team because the process of creating a container image requires teams to select a base image. A base image is a preexisting container image on which to \"base\" their own container image. In doing so, all software contained in the base image is inherited into the team's new image.\n\nThe shift to containers has a monumental impact on security. Now, anyone that deploys your team's container image could be deploying software with known vulnerabilities. Similarly, other teams that base their container images on your team's image will inherit any vulnerabilities present in your team's image. It's crucial that teams have a solution in place for detecting these vulnerabilities in the container images they're using.\n\n## Container Scanning with Grype\n\nFortunately, GitLab 14.0 offers a new way for teams to tackle this challenge: [Grype](https://github.com/anchore/grype). Anchore developed this state-of-the-art vulnerability scanner, which is now available as part of GitLab's Container Scanning feature.\n\nGrype is an advanced vulnerability scanner because it performs deep inspection of the software installed in a container image, and it uses this detailed information to produce better matches with vulnerability data.\n\nGrype is a particularly powerful tool for security-minded engineers to investigate and remediate findings because it gives comprehensive information in the vulnerability analysis, showing exactly how the tool determined vulnerability _X_ matched software package _Y_. Grype provides the transparency and detail necessary for any reported vulnerability to investigate why the image vulnerability is being reported. Some examples of what Grype can identify include: The exact image layer and file path where a package is installed, the source of the vulnerability data, available patches, and which parameters of the vulnerability record matched attributes of the package, among other things.\n\n\"We are excited to embed these very robust container scanning features of Grype within the GitLab DevOps platform,\" says [Sam White](/company/team/#sam.white), senior product manager of Protect at GitLab. \"Our built-in security enables DevOps velocity with confidence and these added features brings even greater security for cloud native applications.\"\n\n## Get started with Grype and GitLab\n\nFollow these steps to get set up GitLab's integration with Grype.\n\n### What you'll need:\n\n- [GitLab Ultimate](/pricing/ultimate/)\n- Access to an image in a container registry (such as the container registry in your GitLab project)\n- Ensure your CI/CD pipeline meets all of the [requirements](https://docs.gitlab.com/ee/user/application_security/container_scanning/index.html#requirements) for Container Scanning.\n\n### How to start scanning with Grype\n\nTo get started, just add the following snippet to your project's `.gitlab-ci.yml` file:\n\n```yaml\ninclude:\n  - template: Security/Container-Scanning.gitlab-ci.yml\n\ncontainer_scanning:\n  variables:\n    CS_ANALYZER_IMAGE: registry.gitlab.com/security-products/container-scanning/grype:4\n```\n\nBy default, the Container Scanning analyzer makes some assumptions about your target container image's URL and tag. You can have the scanner analyze any container image you want — you just need to specify [additional variables](https://docs.gitlab.com/ee/user/application_security/container_scanning/index.html#available-cicd-variables) in the \"container_scanning\" section of your `.gitlab-ci.yml` file. This set of variables also lets you configure registry credentials, custom CA certificates, whether to validate certificates, etc.\n\n## Viewing vulnerability analysis results\n\nOnce your first Container Scanning job completes, you can see what vulnerabilities have been reported. Just go to the \"Security & Compliance\" left-side menu and select \"Vulnerability Report\".\n\n![GitLab Security and Compliance Menu](https://about.gitlab.com/images/blogimages/anchore_blog_images/gitlab-security-menu.jpg){: .shadow}\nNavigate to \"Vulnerability report\" under the \"Security and Compliance\" menu.\n{: .note.text-center}\n\nFor example, here's what your vulnerability report could look like:\n\n![Sample vulnerability report](https://about.gitlab.com/images/blogimages/anchore_blog_images/gitlab-vulnerability-report.jpg){: .shadow}\nSee a sample Vulnerability Report\n{: .note.text-center}\n\nYou'll notice that the Vulnerability Report page gives you an immediate sense of the severities of the vulnerabilities.Even if there is a large number of vulnerabilities, you can quickly filter the list and dive deeper into any single vulnerability.\n\n## Final thoughts\n\nAdding Container Scanning with Grype to your GitLab pipeline is a straightforward process. With just a small snippet of YAML and some optional configuration, you can add tremendous visibility into the security of your team's container images.\n\nRead on to learn more about the [Container Scanning feature with GitLab](https://docs.gitlab.com/ee/user/application_security/container_scanning/index.html).\n\nLastly, make sure to check out the [Grype project](https://github.com/anchore/grype). We have an active open source community and make improvements all the time. If you have any questions or feature requests, don't hesitate to [open an issue](https://github.com/anchore/grype/issues/new/choose) or join our [community Slack](https://anchore.com/slack).",[894,1307,894],{"slug":3721,"featured":6,"template":678},"secure-container-images-with-gitlab-and-grype","content:en-us:blog:secure-container-images-with-gitlab-and-grype.yml","Secure Container Images With Gitlab And Grype","en-us/blog/secure-container-images-with-gitlab-and-grype.yml","en-us/blog/secure-container-images-with-gitlab-and-grype",{"_path":3727,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3728,"content":3734,"config":3740,"_id":3742,"_type":16,"title":3743,"_source":17,"_file":3744,"_stem":3745,"_extension":20},"/en-us/blog/how-to-agentless-gitops-vars",{"title":3729,"description":3730,"ogTitle":3729,"ogDescription":3730,"noIndex":6,"ogImage":3731,"ogUrl":3732,"ogSiteName":692,"ogType":693,"canonicalUrls":3732,"schema":3733},"Using push-based GitOps with GitLab scripts and variables","Learn how GitLab supports agentless approach for GitOps with scripting and variables.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682051/Blog/Hero%20Images/agentless-gitops-vars-cover-880x587.jpg","https://about.gitlab.com/blog/how-to-agentless-gitops-vars","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use a push-based approach for GitOps with GitLab scripting and variables\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2021-07-23\",\n      }",{"title":3735,"description":3730,"authors":3736,"heroImage":3731,"date":3737,"body":3738,"category":14,"tags":3739},"How to use a push-based approach for GitOps with GitLab scripting and variables",[1101],"2021-07-23","\n\nIn [part one](/blog/how-to-use-agent-based-gitops/) of our GitOps series, we described how to use a pull-based (or agent-based) approach. In this second blog post, we'll dig deep into how to use a push-based approach. The agentless approach may be preferable for situations with non-Kubernetes infrastructure components or when you don't want to install, run, and maintain agents in each infrastructure component for [GitOps](/topics/gitops/). In this post, we will discuss how the scripting capabilities of GitLab can be used in GitOps workflows, and how to use predefined GitLab variables to shape infrastructure components.\n\n## About a push-based or agentless approach\n\nWith the agentless approach, infrastructure expressed and managed as code on GitLab, and updates and drift detection are automated and handled by GitLab without having to install any agents on infrastructure components.\n\n## How to use scripting in your pipelines to shape infrastructure\n\nGitLab allows automation using scripting. Whether you're using Docker, Helm, Ansible, or even direct SSH commands, you can use the scripting capabilities of GitLab to create, shape, and modify infrastructure.\n\nIn the example below, the pipeline determines the shape of the infrastructure the application runs on by specifying a Docker image as well as running Docker commands to build and push an application to the GitLab built-in container registry.\n\n![Using Docker in your pipeline to shape infrastructure](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-vars/0-docker-use-in-pipeline.png){: .shadow.small.center.wrap-text}\nHow to use Docker in your pipeline to shape infrastructure.\n{: .note.text-center}\n\nThe infrastructure is shaped again at a later stage in the pipeline, but this time by using kubectl and Helm commands:\n\n![Using kubectl in your pipeline to shape infrastructure](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-vars/1-helm-use-in-pipeline.png){: .shadow.medium.center.wrap-text}\nHow to use kubectl in your pipeline to shape infrastructure.\n{: .note.text-center}\n\nDepending on the type of infrastructure, other technologies can be used to shape the infrastructure. In the next example, Ansible is used to run a playbook that sets up the infrastructure for an entire lab environment:\n\n![Using Ansible in your pipeline to shape infrastructure](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-vars/2-ansible-use-in-pipeline.png){: .shadow.medium.center.wrap-text}\nHow to use Ansible in your pipeline to shape infrastructure.\n{: .note.text-center}\n\nThe scripting capabilities of GitLab pipelines combined with GitLab's CI/CD capabilities allow users to create GitOps flows to manage Infrastructure as Code (IaC), which delivers more resilient infrastructure and less risk of unscheduled downtime.\n\n## How to use Auto DevOps to modify infrastructure using variables\n\nGitLab also allows users to shape infrastructure by using project or group variables. The number of production pods in a Kubernetes cluster is updated to four in the example below:\n\n![Using variables to shape infrastructure](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-vars/3-ado-modify-infra-via-vars.png){: .shadow.medium.center.wrap-text}\nHow to use variables to shape infrastructure.\n{: .note.text-center}\n\nThe number of the production pods are changed to four on the next execution of the pipeline:\n\n![Production pods increased via a variable update](https://about.gitlab.com/images/blogimages/how-to-agentless-gitops-vars/4-ado-modified-infra-via-vars.png){: .shadow.medium.center.wrap-text}\nProduction pods changed using a variable update.\n{: .note.text-center}\n\nThere are many GitLab [build and deployment variables](https://docs.gitlab.com/ee/topics/autodevops/customize.html#build-and-deployment) that can modify infrastructure. [PostgreSQL](https://www.postgresql.org/) is provisioned as a component in infrastructure by default in GitLab to support applications that require a database and also provides [these variables](https://docs.gitlab.com/ee/topics/autodevops/customize.html#database) to customize it.\n\n## How GitLab capabilities help agentless infrastructure\n\nThe scripting capabilities of GitLab are a convenient way to shape infrastructure components in GitOps workflows using a push-based approach. This method allows for the easy integration of IaC tools in your GitOps pipelines. If you are doing IaC and GitOps for non-Kubernetes infrastructure components, this is the best approach. GitLab also provides out-of-the-box variables, so users can impact selected infrastructure components. In the final part of this GitOps series, we will discuss an agentless approach using our integration to Terraform as well as examples of GitOps flows for AWS ECS and EC2.\n\nCover image by [Rod Long](https://unsplash.com/@rodlong?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/machu-picchu?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n\n\n## Read more on GitOps with GitLab: \n\n- [GitOps with GitLab: Infrastructure provisioning with GitLab and Terraform](/blog/gitops-with-gitlab-infrastructure-provisioning/)\n\n- [Here's how to do GitOps with GitLab](/blog/gitops-with-gitlab/)\n\n- [GitOps viewed as part of the Ops evolution](/blog/gitops-as-the-evolution-of-operations/)\n\n- [GitOps with GitLab: Connect with a Kubernetes cluster](/blog/gitops-with-gitlab-connecting-the-cluster/)\n\n\n\n",[535,894,2932],{"slug":3741,"featured":6,"template":678},"how-to-agentless-gitops-vars","content:en-us:blog:how-to-agentless-gitops-vars.yml","How To Agentless Gitops Vars","en-us/blog/how-to-agentless-gitops-vars.yml","en-us/blog/how-to-agentless-gitops-vars",{"_path":3747,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3748,"content":3754,"config":3760,"_id":3762,"_type":16,"title":3763,"_source":17,"_file":3764,"_stem":3765,"_extension":20},"/en-us/blog/teams-gitpod-integration-gitlab-speed-up-development",{"title":3749,"description":3750,"ogTitle":3749,"ogDescription":3750,"noIndex":6,"ogImage":3751,"ogUrl":3752,"ogSiteName":692,"ogType":693,"canonicalUrls":3752,"schema":3753},"Teams speed up development with GitLab's Gitpod integration","Learn about Gitpod as cloud development environment, and how its integration into Gitpod helps teams to get more efficient in their DevOps lifecycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667482/Blog/Hero%20Images/cover-image-unsplash.jpg","https://about.gitlab.com/blog/teams-gitpod-integration-gitlab-speed-up-development","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How teams can use the Gitpod integration in GitLab to speed up their development process\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2021-07-19\",\n      }",{"title":3755,"description":3750,"authors":3756,"heroImage":3751,"date":3757,"body":3758,"category":14,"tags":3759},"How teams can use the Gitpod integration in GitLab to speed up their development process",[1504],"2021-07-19","\n\nTurn back time a bit and try to remember the first project you started or joined, and the onboarding experience. How long did it take to install the development environment on your local machine?\n\nWe talked about our own onboarding experiences into software development, and thought about sharing our favorite tips with GitLab users.\n\n## A developer's tale\n\nEveryone starts fresh, and often best practices are just \"learning by doing,\" requiring documentation in the same moment. Programming languages and application architectures are also different - a C++ backend environment has different requirements than a Ruby on Rails web application.\n\nStart with defining the requirements and stages. Oftentimes they are equivalent to CI/CD pipeline stages but executed in your own environment.\n\n* Compile/build the application and verify that the source code is valid (\"build\")\n* Run linting, unit tests, code quality checks (\"test\")\n* Run the application in a dev environment (\"runtime test\")\n* Package the application, run installation tests (\"staging installation\")\n* Run the installed application (\"staging deployment\")\n* Tag, release, and deploy the application (\"release production deployment\")\n\nYou want to run the application in a development environment quickly, everything else with staging and deployments continues to run in your CI/CD pipelines. Their implementation and availability should be on your to-do list.\n\nSoftware applications can depend on existing libraries which are used by many other developers, and help speed up the development process. These dependencies need to be installed into the development environment - if that is your local macOS, Windows or Linux desktop, methods and requirements will differ.\n\n### Provision development environments\n\nCreating a development environment for many different operating systems has its disadvantages: Error messages can differ and implementation specific details do not produce the same results and require back-and-forth communication on the team. This often leads to friction and slowed down development processes.\n\nOne key learning over the past decade has been to use CI/CD extensively to test different environments and operating systems, and rely on fast feedback in Merge Requests. Developers should be able to focus on their development environment without having to worry about the many production use cases and support.\n\nVirtual machines in Vagrant, and Docker containers made the generic development environment creation easier and efficient. The documentation instructed everyone to either execute `vagrant up` or `docker-compose up -d` and have the development stack ready. The road to creating Vagrant and Docker base images, including the provisioning scripts with Bash, Ansible, Puppet, etc., was and still is a huge learning process. Opinions on \"good\" best practices differ, and adding your preferred IDE on top of a CLI only VM or container often is an adventure on its own.\n\nBandwidth and traffic can also come into play - each provision and software installation run may consume gigabytes of data. If the workloads and provisioning would run in the cloud, your local connection is not affected.\n\nOne customer mentioned a while ago that their company policy forbids installing a local IDE without a license. The Web IDE in GitLab solves this problem for them throughout the onboarding month.\n\n### Development environment in the browser\n\nThe Web IDE helps with basic programming tasks, editing the documentation or setting up the CI/CD configuration. It does not provide a fully fledged server runtime, as cloud IDE with a programming environment capable of understanding the language you are programming in would. Our vision is to explore ways to [add integrated development environments into the Web IDE](/handbook/engineering/incubation/server-runtime/).\n\nThere are a variety of tools and environments following remote collaboration ideas and the cloud IDE approach. You can learn more in [this Twitter thread](https://twitter.com/sytses/status/1400134840754733059) from [GitLab co-founder and CEO, Sid Sijbrandij](/company/team/#sytses). One approach is [Gitpod](https://gitpod.io/), allowing you to spin a fresh environment in the cloud in seconds.\n\nGitpod uses Visual Studio Code (VS Code) as cloud IDE, and integrates with their marketplace to install the same extensions as you would install locally in VS Code. One of the coolest things about Gitpod is that it not only spins up a fresh environment, but also allows you to install additional software or bring your own workspace container image. That way everyone uses the same pre-provisioned environment, and pair programming and debugging becomes a breeze.\n\nNext time, the same state is booted up, secured by single sign-on.\n\n## First steps with Gitpod\n\nNavigate to [gitpod.io](https://gitpod.io) and choose to `continue with GitLab` as login.\n\nIf you are running a self-managed GitLab setup, ask your administrator to [enable the Gitpod integration](https://docs.gitlab.com/ee/integration/gitpod.html).\n\nLet's start with creating a VueJS application. Fork the [learn-vuejs-gitpod](https://gitlab.com/gitlab-de/playground/learn-vuejs-gitpod) project on GitLab.com.\n\n### Alternative: Start on your CLI\n\nAlternatively to forking the project, install NodeJS, npm and the `vue-cli` package, and run `vue create learn-vuejs-gitpod`. The vue command already initializes and commits based on your local Git configuration. Add the remote origin and push to a new repository on the remote GitLab server.\n\n```shell\n$ brew install node\n$ yarn add @vue/cli\n$ vue create learn-vuejs-gitpod\n\n$ cd learn-vuejs-gitpod\n$ git remote add origin https://gitlab.com/\u003Cyourusername>/learn-vuejs-gitpod.git\n$ git push -u origin main\n```\n\nGitLab will [create a private project from the git push command](https://docs.gitlab.com/ee/user/project/working_with_projects.html#create-a-new-project-with-git-push).\n\n### Start Gitpod\n\nStart Gitpod from the repository overview by selecting the dropdown switch from the Web IDE.\n\n![Gitpod VueJS Start](https://about.gitlab.com/images/blogimages/gitlab-gitpod-teams-development/gitpod_gitlab_start_vuejs.png)\n\nSign into your GitLab account with SSO once asked. Accept the required permissions, and wait until the Gitpod environment is booted up.\n\n![Gitpod VueJS Overview](https://about.gitlab.com/images/blogimages/gitlab-gitpod-teams-development/gitpod_vuejs_overview.png)\n\nChange to the terminal and run yarn to install the dependencies and start the development server. No worries, we'll show you how to automate this in a second!\n\n```shell\nyarn install\nyarn serve\n```\n\nGitpod detects the server listening on port 8080 and offers to make it public. Open the browser instead - it works but says `Invalid host header` because the dev server checks the host name. For running inside Gitpod containers, you need to [disable the host checks](https://github.com/gitpod-io/gitpod/issues/26#issuecomment-554058232).\n\nLet's fix this inside Gitpod in the project. Navigate into the left file tree, and add a new file called `vue.config.js` in the top level.\n\n![Gitpod VueJS Overview](https://about.gitlab.com/images/blogimages/gitlab-gitpod-teams-development/gitpod_vuejs_config_disable_host_checks_devserver.png)\n\nCopy the following code snippet into it\n\n```js\n// vue.config.js\nmodule.exports = {\n    // Rationale: https://github.com/gitpod-io/gitpod/issues/26#issuecomment-554058232\n    devServer: {\n        disableHostCheck: true\n    }\n}\n```\n\nAnd stop the running `yarn serve` command in the terminal by pressing `crtl+c`. Press `cursor up` to select the previous command, or type `!!` to repeat the last command followed by `enter` to start the devserver again. Voilà!\n\n![VueJs running app in Gitpod](https://about.gitlab.com/images/blogimages/gitlab-gitpod-teams-development/gitpod_vuejs_web_app.png)\n\nDon't forget to add and commit the new configuration file to persist the changes. Navigate into the `Source Control` section highlighting one pending change. Enter a commit message, click the check mark and approve all pending changes into the commit.\n\n![Gitpod Source Control](https://about.gitlab.com/images/blogimages/gitlab-gitpod-teams-development/gitpod_source_control_add_vuejs_config.png)\n\nSelect the `...` menu to `push` the Git history. Gitpod will ask you for `repository read/write` permissions, walk through the forms and edit them on Gitpod itself. Navigate back to the Gitpod project interface and re-do the push.\n\nFrom the first success, it is not far to your first customized VueJS application. But wait, there is more to learn about Gitpod and efficient workflows!\n\n### VS Code Extensions\n\nNavigate into the `Extensions` menu and search for `gitlab workflow`. Install the extension. We recommend installing it globally for your account and all future workspaces.\n\n![Gitpod extension: GitLab workflow for VS Code](https://about.gitlab.com/images/blogimages/gitlab-gitpod-teams-development/gitpod_extension_gitlab_workflow.png)\n\nNext, navigate into the new GitLab menu item on the left, and configure the extension. It needs a personal access token, similar to the process with a local VS Code extension configuration. Follow the steps in the [Gitlab documentation to create a personal access token](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#create-a-personal-access-token).\n\n![Gitpod: GitLab workflow extension config](https://about.gitlab.com/images/blogimages/gitlab-gitpod-teams-development/gitpod_gitlab_workflow_extension_config.png)\n\n## Speed up your own projects\n\nUsing Gitpod and GitLab to develop GitLab makes it easy to contribute, but what about your own DevOps lifecycle and projects? Below are a few more examples to speed up your development with Gitpod and GitLab.\n\nRemember: You can start Gitpod without any configuration, directly from a GitLab repository. If there are additional settings needed, you can develop them while learning from the examples and documentation best practices.\n\n### Hugo Pages website live review\n\nYou can use Hugo with GitLab pages to host your own private blog, for example. Hugo is a static site generator written in Go, with public Docker images already available. The deployment of [everyonecancontribute.com](https://everyonecancontribute.com/) uses the following configuration in the [.gitlab-ci.yml](https://gitlab.com/everyonecancontribute/web/everyonecancontribute.gitlab.io/-/blob/main/.gitlab-ci.yml) configuration:\n\n```yaml\n.publish: &publish\n  image: registry.gitlab.com/pages/hugo:latest\n  script:\n    - hugo\n  artifacts:\n    paths:\n    - public\n\npages:\n  stage: publish\n  \u003C\u003C: *publish\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n      when: always\n  environment:\n    name: $CI_PROJECT_NAME\n    url: https://$CI_PROJECT_NAME/\n```\n\nA local development environment to preview the website needs the Hugo binary installed. Doing the same in the browser, running the Hugo CLI command and previewing the blog post? We've found a way to provision Gitpod in the same way, using [this .gitpod.yml configuration](https://gitlab.com/everyonecancontribute/web/everyonecancontribute.gitlab.io/-/blob/main/.gitpod.yml):\n\n```yaml\nimage: klakegg/hugo:debian\n\nports:\n  - port: 1313\n\ntasks:\n  - command: hugo server -D -b $(gp url 1313) --appendPort=false\n```\n\nThe Hugo container image gets pulled and the Gitpod workspace builder prepares the environment. Note that [Alpine based images do not work](https://github.com/gitpod-io/gitpod/issues/3356#issuecomment-877604994), use Debian variants instead. After starting the workspace, the tasks run the command, and expose a port. The port binding needs to be the external URL of the pod, not localhost. `gp url 1313` builds the exact URL, and binds the socket to the Hugo server, making the pod URL publicly accessible for reviews.\n\n![Gitpod: Hugo website](https://about.gitlab.com/images/blogimages/gitlab-gitpod-teams-development/gitpod_hugo_everyonecancontribute_com.png)\n\nFrom there, you can switch branches in Gitpod, and immediately verify the changes.\n\n### VueJS with custom container image\n\nGetting started with VueJS in a new project with the `vue-cli` package is very convenient and the Gitpod docs have a [guide](https://www.gitpod.io/docs/languages/vue/#vue-cli) ready. The default `gitpod/workspace-full` image does not provide the `vue cli` package. You can extend the container image by using your [custom .gitpod.Dockerfile](https://www.gitpod.io/docs/config-docker#configure-a-custom-dockerfile) - Gitpod takes care of building the image first, and later starts the workspace based on it.\n\n```yaml\nFROM gitpod/workspace-full\n\nRUN yarn add @vue/cli\n```\n\nThe `.gitpod.yml` configuration file needs to be instructed to build and use a custom image. On startup, the `tasks` section runs the initial dependency installation, and starts the development environment with `yarn serve`. The server listens on port 5000 by default, this is what gets [exposed](https://www.gitpod.io/docs/config-ports), and instructed to open as call-to-action in the browser.\n\n\n```yaml\nimage:\n  file: .gitpod.Dockerfile\n\ntasks:\n  - init: yarn install\n    command: yarn serve\n\nports:\n  - port: 5000\n    onOpen: open-browser\n```\n\nYou can combine Gitpod for previewing the website with the production deployment using the [five minute production app deployment template](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template) shown in [this project](https://gitlab.com/gitlab-de/playground/5-min-prod-app-vuejs). GitLab takes care of provisioning a free AWS EC2 instance, TLS certificates and domain handling.\n\n### More Gitpod workspace images\n\nGitpod provides many [ready-to-use workspace images](https://github.com/gitpod-io/workspace-images). In order to use them, create the `.gitpod.yml` file with this content:\n\n```yaml\nimage:\n  file: .gitpod.Dockerfile\n```\n\nCreate a new `.gitpod.Dockerfile` file and add the import from the desired workspace image.\n\n```yaml\nFROM gitpod/workspace-mysql\n```\n\nIf you need to install additional software, note that the full workspace image is based on Debian and therefore you'll need to use the `apt` package manager. The following command updates the package index, and clears the cache after installation to keep the image clean.\n\n```\nRUN sudo apt update && sudo apt install -y PACKAGENAME && sudo rm -rf /var/lib/apt/lists/*\n```\n\nIf you are not sure about the package name, run Docker locally and search for the package name. Fair warning: The `gitpod/workspace-full` image is huge, use the base image `debian:latest` instead.\n\n```shell\n$ docker run -ti debian:latest bash\n$ apt search POSSIBLENAME\n```\n\nYou can learn more  the [workspace image repository](https://github.com/gitpod-io/workspace-images) to learn more about the Dockerfile configuration used by the builder.\n\n## Do more with Gitpod\n\n### Merge request code reviews\n\nThe GitLab workflow extension comes with more super powers:\n\n* Access the project and Merge Requests\n* Check the CI/CD pipeline status directly in Gitpod\n* Perform MR code reviews in Gitpod and take advantage of [VS Code workflows](/blog/mr-reviews-with-vs-code/)\n\n![Gitpod: MR Code Reviews with the GitLab Workflow extension website](https://about.gitlab.com/images/blogimages/gitlab-gitpod-teams-development/gitpod_vs_code_gitlab_workflow_extension_mr_code_reviews.png)\n\n### Pre-install VS Code Extensions\n\nIn order to ensure specific [VS Code extensions](https://www.gitpod.io/docs/vscode-extensions/) are installed, you can define them in the `.gitpod.yml` configuration file in the repository. Example from the [GitLab project](https://gitlab.com/gitlab-org/gitlab/-/blob/master/.gitpod.yml#L79):\n\n```yaml\nvscode:\n  extensions:\n    - rebornix.ruby@0.28.0\n    - wingrunr21.vscode-ruby@0.27.0\n    - karunamurti.haml@1.3.1\n    - octref.vetur@0.34.1\n    - dbaeumer.vscode-eslint@2.1.8\n    - gitlab.gitlab-workflow@3.24.0\n```\n\n### Learn new programming languages: Rust\n\nGitpod allows you to start a fresh pod environment, pause on idle, and continue at a later point. The default workspace environment image already includes the [Rust compiler](https://www.gitpod.io/docs/languages/rust), which means that you can immediately [start learning Rust](https://doc.rust-lang.org/rust-by-example/).\n\nCreate a new project called `learn-rust` and open Gitpod from the repository view. Add a new file on the left tree view called `hello.rs` and add the following content:\n\n```rust\nfn main() {\n\tprintln!(\"Hello from GitLab! 🦊\");\n}\n```\n\nChange into the terminal and run the following command:\n\n```shell\n$ rustc hello.rs\n```\n\nWe started learning Rust together in an [#EveryoneCanContribute cafe](https://everyonecancontribute.com/post/2020-10-07-cafe-3-gitpod-gitlab-rust/) in October 2020 including [workshop slides with exercises](https://docs.google.com/presentation/d/1t1FdHh04TAOg9WITqRFJHz1YFxMbsQeekN8th1UfFcI/edit). We continued with [Rocket.rs](https://everyonecancontribute.com/post/2021-06-30-cafe-36-rust-rocket-prometheus/) as web app and additional Prometheus monitoring metrics in June 2021. You can watch the recordings to follow the learning process, the mistakes we made on the way, and the first success.\n\n### How to contribute to GitLab with Gitpod\n\nA more complex development environment is GitLab itself. The [architecture](https://docs.gitlab.com/ee/development/architecture.html) involves many different components, and the development environment requires you to install several dependencies in Ruby, NodeJS, Go, and backend applications. The GitLab Development Kit (GDK) describes the steps in detail - in order to get everything up and running, you need to plan for a 30 minutes to three hour process, depending on the compute power and bandwidth.\n\nEarly in the process of adopting Gitpod for GitLab team members, the groundwork with the base image and bootstrap script took the majority of the preparation time. You can learn more about the integration process in [this issue request](https://gitlab.com/gitlab-org/gitlab-development-kit/-/issues/1076).\n\n> It's already possible to try out how the setup works by opening Gitpod, which after waiting for the setup to finish (six to eight minutes) will bring you the Gitpod UI with the GDK fully running and ready for you to make changes and commit. As soon as that setup is finished, you can switch to whatever branch you want, either from the Gitpod UI or via the terminal.\n\nThe [GDK documentation for Gitpod](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/main/doc/howto/gitpod.md) guides you through the required steps. **Important**: You need to start Gitpod from the [gitlab-org/gitlab](https://gitlab.com/gitlab-org/gitlab/) project (as team member, as contributor, please fork the repository). Additional features, such as a local GitLab runner, feature flags, Advanced search, etc., must be [enabled manually](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/main/doc/howto/gitpod.md#configure-additional-features).\n\n![GitLab Development Kit running in Gitpod](https://about.gitlab.com/images/blogimages/gitlab-gitpod-teams-development/gitpod_gitlab_gdk_running.png)\n\n### Everyone can contribute\n\nReady? Start contributing to your favorite OSS project, and connect with your teams for an all-remote pair programming session using Gitpod! :-)\n\nCover image by [Thomas Lipke](https://unsplash.com/photos/oIuDXlOJSiE) on [Unsplash](https://unsplash.com)\n{: .note}\n",[232,1347,727],{"slug":3761,"featured":6,"template":678},"teams-gitpod-integration-gitlab-speed-up-development","content:en-us:blog:teams-gitpod-integration-gitlab-speed-up-development.yml","Teams Gitpod Integration Gitlab Speed Up Development","en-us/blog/teams-gitpod-integration-gitlab-speed-up-development.yml","en-us/blog/teams-gitpod-integration-gitlab-speed-up-development",{"_path":3767,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3768,"content":3774,"config":3779,"_id":3781,"_type":16,"title":3782,"_source":17,"_file":3783,"_stem":3784,"_extension":20},"/en-us/blog/gitops-as-the-evolution-of-operations",{"title":3769,"description":3770,"ogTitle":3769,"ogDescription":3770,"noIndex":6,"ogImage":3771,"ogUrl":3772,"ogSiteName":692,"ogType":693,"canonicalUrls":3772,"schema":3773},"GitOps viewed as part of the Ops evolution","Examine the evolution that led to GitOps","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682062/Blog/Hero%20Images/food-train.jpg","https://about.gitlab.com/blog/gitops-as-the-evolution-of-operations","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitOps viewed as part of the Ops evolution\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2021-07-12\",\n      }",{"title":3769,"description":3770,"authors":3775,"heroImage":3771,"date":3776,"body":3777,"category":14,"tags":3778},[2014],"2021-07-12","\n\nGitOps is a hot topic in the world of operations, but what does it provide to workflows that we didn’t have already? Looking at the evolution of the operations space, there have been many big changes in the past 20 years, and I argue that [GitOps](/topics/gitops/) is not a change, rather a summary of best practices. So, how do we describe the major phenomenon in Ops over the past 20 years? \n \nBefore 2000, the primary approach to operations was to hire a System Administrator or empower Lead Developers to do Ops work. System Administrators knew a lot about networking and server optimisations, and a good sysadmin can do most of their work through code, using Bash, Perl or Python scripts. While every software engineer likely knows at least the basics of shell scripting, even many backend engineers would not be comfortable with the level of bash scripting needed in traditional IT. \n \nBesides bash, there were situations where the infrastructure was managed through graphical user interfaces. Most enterprise IT software shipped with some level of graphical UI. This area was particularly alien to software developers. I first worked as a system administrator at a student house in Hungary. We used Novell tools to manage our network, create backups, and set up workstations. To be successful, I had to learn a lot about the tools and the domain, while my programming skills were pretty minimal.\n \nToday, a cloud-native \"system administrator\" does their job primarily through API calls. The APIs are triggered through some infrastructure as code approaches. Thus, even the sysadmins of today require much more advanced coding skills than they needed 20 years ago. Moreover, codefying your infrastructure enables battle-tested software development best practices, like testing, be introduced in operations, too. \n\nThis is a huge change compared to where we were 20 years ago. What has changed that got us to where we are now and how does it relate to GitOps?\n\n## The story\n \n\u003Ciframe src='https://cdn.knightlab.com/libs/timeline3/latest/embed/index.html?source=1_ZqRL3FjiRWlwW0Nx6imkrDcCbQtiFV4tJvR1JLiy3s&font=Default&lang=en&initial_zoom=2&height=650' width='100%' height='650' webkitallowfullscreen mozallowfullscreen allowfullscreen frameborder='0'>\u003C/iframe>\n \n### The first signals at Google\n\nThe System Administrator era is the initial period where our story starts. As we move forward, the first milestone is in 2003. For our story, two notable events happened during 2003. First, [Google presented Borg](https://research.google/pubs/pub43438/), their internal container management system that later became [Kubernetes](/blog/gitlab-kubernetes-agent-on-gitlab-com/). Second, Google hired Benjamin Treynor, and the SRE approach started with his collaboration. Let's stop here for a minute to speak about the core aspects of the SRE approach!\n\n[Site Reliability Engineering (SRE)](https://sre.google/) is a software engineering approach to IT operations. Software engineers write software to reach a goal, there is likely a process around delivering the software that includes code reviews and tests, and there are success metrics attached to the delivered output. These success metrics in the context of SRE are called Service Level Indicators, and there are related Service Level Objectives and Service Level Agreements. By applying software engineering practices to operations, the reliability and scalability of the system can be better understood and improved. Moreover, the automations that emerge from the approach enable the development teams to be more efficient as they can often self-serve their requirements.\n \n### The public cloud\n\nLet’s continue our story. For many companies around the world, an important development was Amazon Web Services (AWS). AWS launched in 2006 with 3 services: S3, SQS and EC2. Together, these services enabled companies to switch to AWS or to start their business on [AWS infrastructure](/blog/deploy-aws/). Amazon's market share has made it the leading cloud provider today, and their name is coupled tight with public clouds. As increasing workloads migrated to the cloud, the way of operations had to adapt. \n \nIn past years, I've run many interviews with IT operations professionals and asked them about their [infrastructure as code (IaC) practices](/topics/gitops/infrastructure-as-code/). From these interviews, a very strong pattern emerged around IaC adoption. Companies usually switch to IaC as they move their infrastructure to the cloud. Simply, managing dozens of cloud services through a UI is very problematic, and managing them through a single codebase is much more convenient. Together with the move to the cloud, there is a strong push to improve operations practices, and move towards more automated approaches.\n \n### The appearance of DevOps\n\nWhile the struggles of software delivery were well-known by 2009, the SRE approach pioneered at Google was not as widely adopted. As agile started to be formalized in 2000, it seemed that we found a solution to the problem of delivering the built services in front of the user becoming more and more stringent. As a result of many discussions around this topic, Patric Debois coined the term DevOps in 2009. \n \n> DevOps describes the cultural changes required in order to enable high-quality service delivery. The core idea of DevOps is to create a well-oiled process around service delivery by setting shared goals and clear ownerships. The many approaches to DevOps are highlighted by [the 9 types presented as DevOps team topologies](https://web.devopstopologies.com/).\n \nJust like many agile techniques existed before agile was formalized, the SRE approach existed before the term DevOps came to be, and it can be considered an implementation of DevOps. There are just as many agile techniques as there are ways to implement DevOps. \n \n### Containers to drive the process\n\nIn 2013, several developments were made. O'Reilly published the first book on DevOps, and the operations space got a new tool - docker - which led the way to containerisation and changed our industry tremendously. Containerisation provides a standard way to ship software. Previously, engineers could build a Debian package or a Java jar file. Basically, every technology had its own packaging solution, and there are _many_ technologies. Containers provide a single, standard way to package an application, enabling both developers to own what happens inside the container and infrastructure teams to support developers to ship containers reliably and quickly into production.\n \nThe idea of containerisation solves another problem, that of stale resources. For a long time, operations had to start different servers for various workloads, dependencies of workloads had to be taken care of, and that led to stale servers and huge inefficiencies, but we did not have a good model around orchestrating the workloads. Apache Mesos was presented in 2009 and Docker Swarm in 2014, indicating innovation in this space. In 2014, Kubernetes was presented as the open source version of Google's Borg system, and it quickly became the leading solution in this area. When released, it already supported docker containers, provided declarative infrastructure management through the Kube API, and came with a reconciliation loop at its core. Basically, the end user describes the expected state and sends it to the system, and Kubernetes tries to reach and maintain that state. Using an API for cloud operations was not new any more, still describing what we want to see, instead of imperatively commanding the system to take specific actions is a novel approach. Moreover, this enables the system to self-heal, as it can always aim at reaching the desired state. Beside better resource utilisation, these are the core values of container orchestrators.\n\n### The summary is GitOps\n\nOur story slowly gets to its end in 2017 when the GitOps term was coined. GitOps provides a summary of what we had already without adding anything new to the picture. Even though the summary was known, this workflow did not have a name yet. The cultural changes required for modern IT operations are described by DevOps and shown in the SRE approach. Automation has been with us since the advent of continuous integration, and new tools like AWS, containers, and Kubernetes enabled it in operations too. Finally, Kubernetes provides a way for the system to take care of itself (more or less), and provides a self-healing aspect of automation. As Gene Kim wrote in the _Phoenix Project_, “The Second Way is about creating the right to left feedback loops”. Coupling this with storing all the code that describes our system in a versioned manner, applying them automatically through a well-defined process, and finally using a self-healing system is what we call GitOps. \n\n## What does it mean to you\n \nAt GitLab, our [vision](https://about.gitlab.com/direction/#vision) is to provide a single application for the whole DevSecOps lifecycle. As part of this, GitLab offers one of the leading CI automation tools, and our dedicated [Infrastructure as Code](https://docs.gitlab.com/ee/user/infrastructure) and [Kubernetes Management](https://docs.gitlab.com/ee/user/project/clusters/) enable best practice operations for modern ops teams. We understand that many services are run in legacy infrastructures, where automation is very problematic, and some companies do not have the resources or need to move to Kubernetes. As shown above, the canonical definition of GitOps is not feasible in these situations. Thankfully, the value of GitOps is minor compared to the value of a strong DevOps culture combined with the automation enabled by the target systems.\n \nAs a result, I encourage everyone to approach GitOps by understanding their current level of DevOps practices as GitOps will emerge naturally from following well-known practices in the DevOps area.\n \nCover image by [Sigmund](https://unsplash.com/@sigmund?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/evolution?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n\n## Read more on GitOps with GitLab: \n\n- [GitOps with GitLab: Infrastructure provisioning with GitLab and Terraform](/blog/gitops-with-gitlab-infrastructure-provisioning/)\n\n- [Here's how to do GitOps with GitLab](/blog/gitops-with-gitlab/)\n\n- [How to use a push-based approach for GitOps with GitLab scripting and variables](/blog/how-to-agentless-gitops-vars/)\n\n- [GitOps with GitLab: Connect with a Kubernetes cluster](/blog/gitops-with-gitlab-connecting-the-cluster/)\n\n",[894,535],{"slug":3780,"featured":6,"template":678},"gitops-as-the-evolution-of-operations","content:en-us:blog:gitops-as-the-evolution-of-operations.yml","Gitops As The Evolution Of Operations","en-us/blog/gitops-as-the-evolution-of-operations.yml","en-us/blog/gitops-as-the-evolution-of-operations",{"_path":3786,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3787,"content":3793,"config":3798,"_id":3800,"_type":16,"title":3801,"_source":17,"_file":3802,"_stem":3803,"_extension":20},"/en-us/blog/how-to-stand-up-gitlab-in-awsmp",{"title":3788,"description":3789,"ogTitle":3788,"ogDescription":3789,"noIndex":6,"ogImage":3790,"ogUrl":3791,"ogSiteName":692,"ogType":693,"canonicalUrls":3791,"schema":3792},"How to stand-up a GitLab instance in AWS Marketplace","This is a quick quide to help you provision a GitLab instance in the AWS Marketplace and setup a Runner.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682043/Blog/Hero%20Images/awsmp.png","https://about.gitlab.com/blog/how-to-stand-up-gitlab-in-awsmp","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to stand-up a GitLab instance in AWS Marketplace\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2021-06-30\",\n      }",{"title":3788,"description":3789,"authors":3794,"heroImage":3790,"date":3795,"body":3796,"category":14,"tags":3797},[1019],"2021-06-30","\n\n## In this guide we will learn how to spin up GitLab in the AWS Marketplace:\n\n### Pre requisites for this lab are having an account in AWS and an accessible and working VPC.\n\n### We will learn the following steps:\n\n1. Stand up a self-managed instance of GitLab.\n2. Install Runner and Docker Engine.\n\n\n## Step-by-step Instructions\n\n\n### Step 1 - Stand up GitLab instance in AWS\n\n\n- Open [GitLab Ultimate](https://aws.amazon.com/marketplace/pp/B07SJ817DX) in AWS Marketplace.\n- Click on **Continue to Subscribe**\n\n![aws-1](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-1.png)\n\n- Sign in with your IAM user.\n\n![aws-2](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-2.png)\n\n- Click on **Continue to Configuration**.\n\n![aws-3](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-3.png)\n\n- Leave the default value for **Delivery Method**, select the latest version in **Software Version**, select your **Region**, click **Continue to Launch**.\n\n![aws-4](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-4.png)\n\n- In Launch this software page, scroll down.\n\n![aws-5](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-5.png)\n\n- Under **Security Group Settings** click **Create New Based On Seller Settings** .\n\n![aws-6](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-6.png)\n\n- Name your security group, add a description, and save it.\n\n![aws-7](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-7.png)\n\n- Select **Key Pair**. If you don't have key pair, create one. Leave other fields in this page with default values.  Click **Launch**.\n\n![aws-8](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-8.png)\n\n- You will get Congratulations message confirming you launched the machine successfully. In this message click on **EC2 Console** link.\n\n![aws-9](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-9.png)\n\n- Click on your instance ID link.\n\n![aws-10](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-10.png)\n\nThe provisioning takes a few minutes. Please wait before you start the next step.\n\n- Click \"Open address\" in order to open GitLab UI.\n\n Copy the **private** or **public** IP to your browser , depending on your **VPC configuration**.\n\n\n![aws-10_5](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-10_5.png)\n\n- It takes a few minutes to start the server, you may see this error, this is ok, wait 1 minute and refresh the page.\n\n![aws-11](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-11.png)\n\n- You now should be able to access the GitLab login page; Username is **root**, password is your **instance ID**, click **Sign in**.\n\n![aws-13](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-13.png)\n\n## Congratulations! you managed to start a GitLab instance and sign in to it.\n\n![aws-14](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/aws-14.png)\n\n\n\n\n### Step 2 - Install Runner and Docker Engine\n\n\nRunner machines are the build agents that run the CI/CD jobs.\n\nRequirements:\n\n- Jobs run inside the Docker images, therefore the runner machine requires Docker engine on the runner machine.\n\n\n### Connect to the machine with the **AWS console - Connect**\n\nIn order to setup the Runners and Docker engine, we need to connect to the GitLab machine we are running. This can be done via **SSH** from any command line, or directly via the **AWS Console**, depending on how your **VPC** is set. In our example we will use the **AWS console - Connect** feature to SSH into the machines.\n\n**WARNING: It is not a recommended best practice to install Runners on the same machine where the server is installed for security and performance reasons, but only for the sake of simplicity, in this blog we will install it on the same machine.**\n\n  - Go to your Instance summary, and click **Connect** in order to open the console.\n\n  ![runner-1](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/runner-1.png)\n\n  - Click Connect again.\n\n  ![runner-2](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/runner-2.png)\n\n\n### Install Docker engine\n\n  - Install Container by running this command `curl -fsSL https://get.docker.com -o get-docker.sh\n   sudo sh get-docker.sh`\n\n\n### Setup Runners\n\n  - Download the binaries for Linux x86 `sudo curl -L --output /usr/local/bin/gitlab-runner \"https://gitlab-runner-downloads.s3.amazonaws.com/latest/binaries/gitlab-runner-linux-386\"`\n  - Give it permissions to execute: `sudo chmod +x /usr/local/bin/gitlab-runner`\n  - Create a GitLab CI user: `sudo useradd --comment 'GitLab Runner' --create-home gitlab-runner --shell /bin/bash`\n  - Install and run as service: `sudo gitlab-runner install --user=gitlab-runner --working-directory=/home/gitlab-runner\nsudo gitlab-runner start`\n\n\n### Register the Runner\n\n\n  - Run this command: `sudo gitlab-runner register`.\n  - You will be prompt to enter URL.\n  - Open your GitLab instance, under CI/CD settings:\n    - Click Settings, CI/CD.\n\n      ![runner-2](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/runner-3.png)\n\n    - Expand **Runners**.\n\n      ![runner-4](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/runner-4.png)\n\n    - Copy the URL to the clipboard under specific runner.\n\n    ![runner-5](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/runner-5.png)\n\n  - Paste the URL in the console.\n  - Enter.\n  - You will be prompt to enter registration token, copy it from the Runner settings.\n\n![runner-5](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/runner-6.png)\n\n  - Paste it in the console.\n  - Enter Description for the runner: type **GitLab workshop**.\n  - Add a tag to this runner, for example type **Linux**\n  - Enter executor, type **docker**.\n  - Enter the default Docker image, type **ruby:2.6**.\n  - You will get a message starting with **Runner registered successfully. Feel free to start it...**\n  - Refresh the Runner settings page in GitLab and you will see your runner under **Available specific runners**.\n  - Click edit.\n\n  ![runner-7.png](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/runner-7.png)\n\n  - Check the **Indicates whether this runner can pick jobs without tags** option, and click **Save changes**.\n\n  ![runner-7.png](https://about.gitlab.com/images/blogimages/2021-aws-marketplace-blog/runner-8.png)\n\n\n## Well done!! You installed and registered successfully GitLab Runner. Now you are ready to create a project and run your first CI/CD pipeline.\n\nIn my next blog, I will show you how to create a project, configure the CI/CD, change your application code, and run a CI/CD pipeline.\n",[1347,894],{"slug":3799,"featured":6,"template":678},"how-to-stand-up-gitlab-in-awsmp","content:en-us:blog:how-to-stand-up-gitlab-in-awsmp.yml","How To Stand Up Gitlab In Awsmp","en-us/blog/how-to-stand-up-gitlab-in-awsmp.yml","en-us/blog/how-to-stand-up-gitlab-in-awsmp",{"_path":3805,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3806,"content":3812,"config":3817,"_id":3819,"_type":16,"title":3820,"_source":17,"_file":3821,"_stem":3822,"_extension":20},"/en-us/blog/how-to-use-agent-based-gitops",{"title":3807,"description":3808,"ogTitle":3807,"ogDescription":3808,"noIndex":6,"ogImage":3809,"ogUrl":3810,"ogSiteName":692,"ogType":693,"canonicalUrls":3810,"schema":3811},"How to use a pull-based (agent-based) approach for GitOps","Learn how GitLab supports agent-based approach for GitOps","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682037/Blog/Hero%20Images/agent-based-gitops-cover-880x587.jpg","https://about.gitlab.com/blog/how-to-use-agent-based-gitops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use a pull-based (agent-based) approach for GitOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2021-06-23\",\n      }",{"title":3807,"description":3808,"authors":3813,"heroImage":3809,"date":3814,"body":3815,"category":14,"tags":3816},[1101],"2021-06-23","\n\nIn the previous post, titled [3 ways to approach GitOps](https://about.gitlab.com/blog/gitops-done-3-ways/), we discussed the many benefits and options that GitLab supports for fulfilling the [GitOps](/topics/gitops/) requirements of customers, whose IT environments are composed of heterogeneous technologies and infrastructures. This post is a 3-part series, in which we delve deeper into these options. In this first part, we cover the pull-based or agent-based approach.\n\n## About a pull-based or agent-based approach\n\nIn this approach, an agent is installed in your infrastructure components to pull changes whenever there is a drift from the desired configuration, which resides in GitLab. Although the infrastructure components could be anything from a physical server or router to a VM or a database, we will focus on a Kubernetes cluster in this section.\n\nIn the following example, the [reconciliation loop](https://about.gitlab.com/solutions/gitops/) is made up of two components: an agent running on the Kubernetes cluster and a server-side service running on the GitLab instance. One of the benefits of this approach is that you don’t have to expose your Kubernetes clusters outside your firewall. Another benefit is its distributed architecture, in that agents running on the infrastructure components are in charge of correcting any drift relieving the server-side from resource consumption. This approach requires the maintenance and installation of agents on all infrastructure components you want to be part of your GitOps flows.\n\n### GitLab Agent for Kubernetes as a pull-based approach\n\n[Introduced](https://about.gitlab.com/releases/2020/09/22/gitlab-13-4-released/#introducing-the-gitlab-kubernetes-agent) as part of GitLab 13.4, the GitLab Agent for Kubernetes runs on your Kubernetes cluster and pulls changes in your infrastructure configuration from GitLab to your cluster keeping your infrastructure configuration from drifting away from its desired state.\n\nGitLab Agent for Kubernetes (the feature) is currently implemented as two components ([architecture doc](https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent/-/blob/master/doc/architecture.md)):\n\n- GitLab Agent for Kubernetes (agentk program): The component that users install into their cluster.\n\n- GitLab Agent for Kubernetes Server (kas program): The server-side counterpart, that runs \"next to GitLab.\"\n\nThe high-level architecture of the GitLab Agent for Kubernetes is depicted below:\n\n![GitLab K8s agent high-level architecture](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/0-K8s-agent-arch.png){: .shadow.small.center.wrap-text}\nGitLab K8s agent high-level architecture.\n{: .note.text-center}\n\nThe **agentk** is installed on your Kubernetes cluster and it is the component that applies updates to the infrastructure. The **kas** is installed on the GitLab instance and it manages the authentication and authorization between **agentk** instances and GitLab, monitors projects for any changes and gathers latest project manifests to send to **agentk** instances.\n\n> **NOTE:** on Gitlab.com, the **kas** is installed and maintained by GitLab. On self-managed instances, the customer needs to install it.\n\nIn the following self-managed instance example, we go through a GitOps flow that leverages the pull-based approach to GitOps.  After the **agentk** component has already been installed on the K8s cluster, the user proceeds to log on to the GitLab instance and creates a project called **gitops-project**:\n\n![Creating the gitops-project](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/1-create-gitops-proj.png){: .shadow.medium.center.wrap-text}\nCreating the gitops-project.\n{: .note.text-center}\n\nThe project **gitops-project** will be the one that will be monitored or observed by the **kas** component. Then, under **gitops-project**, the user creates an empty manifest file called **manifest.yaml**. This is the manifest file that will contain the Infrastructure as Code configuration for this project:\n\n![Manifest file created](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/2-manifest-file-created.png){: .shadow.medium.center.wrap-text}\nManifest file created.\n{: .note.text-center}\n\nNext, the user creates a Kubernetes agent configuration repository project, **kubernetes-agent**, which will contain information pertinent to the **kas** component.\n\n![Creating the kubernetes-agent project](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/3-create-K8s-agent-proj.png){: .shadow.medium.center.wrap-text}\nCreating the kubernetes-agent project.\n{: .note.text-center}\n\nWithin the **kubernetes-agent** project, the user creates a subdirectory **.gitlab/agents/agent1**, where **agent1** is the name given to this specific agent:\n\n![Config.yaml file created](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/4-config-yaml-created.png){: .shadow.medium.center.wrap-text}\nConfig.yaml file created.\n{: .note.text-center}\n\nNotice that in the screenshot above, the project to be observed, **gitops-project**, was created in an earlier step.\n\nThe next step consists of the creation of a GitLab Rails Agent record to associate it with the Kubernetes agent configuration repository project. In the following screenshot, you see the commands that the user enters to first identify the task-runner pod, to log into it, to enter the Rails Console, and finally to create the agent record and a token for it:\n\n![Agent record created](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/5-agent-record-created.png){: .shadow.medium.center.wrap-text}\nAgent record created.\n{: .note.text-center}\n\nIn the above screenshot, the last command uses the agent token to create a secret on the K8s cluster for secured communication between the **agentk** and the **kas** components.\n\nThe **agentk** pod creation on the K8s cluster is the next step. For this, the user creates a **resources.yml** file, in which the secured communication protocol between the **agentk** and the **kas** is specified as shown in the following snippet:\n\n![Websockets line](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/6-wss-line-in-resources-yml.png){: .shadow.medium.center.wrap-text}\nWebSockets communication specified in the resources.yml file.\n{: .note.text-center}\n\nIn the above snippet, secured WebSockets protocol is being used. GitLab also supports gRPC.\n\nOnce the **resources.yml** file is updated with the corresponding GitLab instance information, the user proceeds to create the pod:\n\n![Agentk pod created](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/7-agentk-created.png){: .shadow.medium.center.wrap-text}\nCreation of the **agentk** pod.\n{: .note.text-center}\n\nIn the screenshot above, you can see the execution of the **kubectl apply** that created the **agentk** pod in the K8s cluster.\n\nNow that the **agentk** and **kas** have been installed and are communicating securely with each other, the user can start performing some GitOps flows. Although the [GitLab Flow](https://about.gitlab.com/topics/version-control/what-is-gitlab-flow/) is the recommended approach for DevOps, it is also applicable to GitOps flows; after all GitOps is all about applying the goodness of DevOps to managing [Infrastructure as Code](/topics/gitops/infrastructure-as-code/).\n\nThis means that the user should create an issue and then a merge request, in which all stakeholders can collaborate towards the resolution of the issue. For the sake of brevity, in this technical blog post, we will skip all these steps and show you how updates to the Infrastructure as Code configuration files are automatically applied to the infrastructure components.\n\nNOTE: Fostering Collaboration is a great benefit of GitOps. For more information on this, check out this short [tech video](https://youtu.be/onFpj_wvbLM).\n\nFor example, the user can start making updates to the **manifest.yaml** file under the **gitops-project**, which is being observed by the kas component. Here you can see the user has pasted content into this file:\n\n![Manifest.yaml file updated](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/8-manifest-yaml-updated.png){: .shadow.medium.center.wrap-text}\nManifest.yaml file updated.\n{: .note.text-center}\n\nRemember that this file had been created as an empty file. As soon as the user commits the changes displayed above, the **kas** component will detect the changes and communicate these to the **agentk** component, which is running on the K8s cluster. The **agentk** will immediately apply these changes to the infrastructure. In this example, the user has updated the infrastructure configuration file to have 2 instances of an nginx. As shown in the screenshot below, the **agentk** has applied these updates by the instantiation of 2 nginx pods in the K8s cluster:\n\n![Two nginx pods up and running](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/9-two-nginx-running.png){: .shadow.medium.center.wrap-text}\nGitOps flow instantiates two nginx pods.\n{: .note.text-center}\n\nIf the user were to change the **manifest.yaml** file one more time and increment the replicas of the nginx pod to 3:\n\n![Manifest.yaml file updated with 3 nginx](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/10-manifest-yaml-updated-again.png){: .shadow.medium.center.wrap-text}\nManifest.yaml file updated with 3 nginx instances.\n{: .note.text-center}\n\nAgain, as soon as the commit takes place, the **kas** component detects the update and communicates this to the **agentk** component, which in turn, spins up a third nginx pod in the K8s cluster:\n\n![Three nginx pods up and running](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/11-three-nginx-running.png){: .shadow.medium.center.wrap-text}\nGitOps flow instantiates a third nginx pod.\n{: .note.text-center}\n\nLastly, the user can check the log files of the different components running on GKE, in this example. In the following screenshot, the user can see the **kas** component running on the GitLab instance:\n\n![kas running on GKE](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/12-kas-on-GKE.png){: .shadow.medium.center.wrap-text}\nThe **kas** component running on GKE.\n{: .note.text-center}\n\nAnd then the user can drill down into the log of the **kas** component, and see how it is detecting commits on the project it is observing:\n\n![kas log on GKE](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/13-kas-log-on-GKE.png){: .shadow.medium.center.wrap-text}\nThe **kas** log output on GKE.\n{: .note.text-center}\n\nLikewise, the user can navigate to the **agentk** component of the K8s cluster:\n\n![agentk running on GKE](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/14-agentk-on-GitLab.png){: .shadow.medium.center.wrap-text}\nThe **agentk** component running on GKE.\n{: .note.text-center}\n\nAnd, again drill down to its log to see, how the **agentk** component runs synchronizations with the **kas** component:\n\n![agentk log on GKE](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/15-agentk-log-top-on-GitLab.png){: .shadow.medium.center.wrap-text}\nThe **agentk** log output on GKE.\n{: .note.text-center}\n\nIn the following screenshot, the user sees the log statements indicating that the **agentk** is instantiating a third instance of an nginx pod:\n\n![agentk instantiating a third nginx pod](https://about.gitlab.com/images/blogimages/how-to-use-agent-based-gitops/16-agentk-log-synced-on-GitLab.png){: .shadow.medium.center.wrap-text}\nThe **agentk** instantiating a third nginx pod.\n{: .note.text-center}\n\nThe above sections described an example of the setup needed to install and run the GitLab Agent for Kubernetes as well as how projects are monitored and synchronized from GitLab to a running K8s cluster.\n\n## Conclusion\n\nWe have gone over the setup and use of the Agent, which is an integral part of our pull-based or agent-based approach to GitOps. We also covered a GitOps flow that leveraged this agent-based approach, which is a good choice for Kubernetes shops that need to keep their clusters secured and behind their firewall. This approach comes with its drawbacks in that you need to maintain the agents, which also consume the resources of your infrastructure components. In part two of this series, we will discuss the push-based or agentless approach to GitOps.\n\nCover image by [Vincent Ledvina](https://unsplash.com/@vincentledvina?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/grand-tetons?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[535,749,832,894,2932],{"slug":3818,"featured":6,"template":678},"how-to-use-agent-based-gitops","content:en-us:blog:how-to-use-agent-based-gitops.yml","How To Use Agent Based Gitops","en-us/blog/how-to-use-agent-based-gitops.yml","en-us/blog/how-to-use-agent-based-gitops",{"_path":3824,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3825,"content":3831,"config":3836,"_id":3838,"_type":16,"title":3839,"_source":17,"_file":3840,"_stem":3841,"_extension":20},"/en-us/blog/how-to-become-more-productive-with-gitlab-ci",{"title":3826,"description":3827,"ogTitle":3826,"ogDescription":3827,"noIndex":6,"ogImage":3828,"ogUrl":3829,"ogSiteName":692,"ogType":693,"canonicalUrls":3829,"schema":3830},"How to become more productive with Gitlab CI","Explore some CI/CD strategies that can make your team more efficient and productive.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667358/Blog/Hero%20Images/gitlab-productivity.jpg","https://about.gitlab.com/blog/how-to-become-more-productive-with-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to become more productive with Gitlab CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Veethika Mishra\"}],\n        \"datePublished\": \"2021-06-21\",\n      }",{"title":3826,"description":3827,"authors":3832,"heroImage":3828,"date":3833,"body":3834,"category":14,"tags":3835},[1140],"2021-06-21","\nCI/CD pipelines are the preeminent solution to mitigate potential risks while integrating code changes into the repository. CI/CD pipelines help isolate the impact of potential errors, making it easier to fix them. Top that with a tool that provides effective visibility into the running tasks and there you have a recipe for success.\n\nSince the primary purpose of CI/CD pipelines is to speed up the development process and provide value to the end user faster, there's always room to make the process more efficient. This blog post unpacks some strategies that can help you get the most out of your pipeline definition in [GitLab CI](/solutions/continuous-integration/).\n\n## How Directed Acyclic Graphs (DAG) enable concurrent pipelines\n\n![By using Needs keyword you can define dependencies for jobs that need to be used from previous stages.](https://about.gitlab.com/images/blogimages/dag-explained.jpeg)\nBy using the \"Needs\" keyword you can define dependencies for jobs that need to be used from previous stages.\n{: .note.text-center}\n\nIn a [basic-pipeline](https://docs.gitlab.com/ee/ci/pipelines/pipeline_architectures.html#basic-pipelines) structure, all the jobs in a particular stage run concurrently and the jobs in the subsequent stage have to wait on those to finish to get started. This continues for all the stages.\n\nIn the image above, the first job in the second stage only depends on the first two job in the first stage to get started. But with the basic pipeline order in place, it has to wait for all three jobs in the first stage to complete before it can start executing, which slows down the overall pipeline considerably. However, by using `needs:` keywords, you can define a direct dependency for the jobs and they would only have to wait on the job they depend on to get started. By using the [DAG strategy](https://docs.gitlab.com/ee/ci/directed_acyclic_graph/), you could shed out a few minutes from the processes for a certain project, thereby increasing the pipeline execution speed and bringing down the CI minutes consumption.\n\nBy using `needs: []` you can make the job in any stage run immediately, as it doesn't have to wait on any other job to finish.\n\n## Why parallel jobs increase productivity\n\nNot all the jobs in a pipeline have an equal run-time. While some may take just a few seconds, some take much longer to finish. When there are many team members waiting on a running pipeline to finish to be able to make a contribution to the project, the productivity of the team takes a hit.\n\nGitLab provides a method to make clones of a job and run them in parallel for faster execution using the `parallel:` keyword. While [parallel jobs](https://docs.gitlab.com/ee/ci/yaml/#parallel) may not help in reducing the consumption of [CI minutes](/pricing/faq-compute-credit/), they definitely help increase work productivity.\n\n## Break down big pipelines with parallel matrix Jobs\n\nBefore the release of [parallel matrix jobs](https://docs.gitlab.com/ee/ci/yaml/#parallel-matrix-jobs), in order to run multiple instances of a job with different variable values, the jobs had to be manually defined in the `.gitlab-ci-yml` like this:\n\n```yaml\n.run-test:\n  script: run-test $PLATFORM\n  stage: test\n\ntest-win:\n  extends: .run-test\n  variables:\n    - PLATFORM: windows\ntest-mac:\n  extends: .run-test\n  variables:\n    - PLATFORM: mac\ntest-linux:\n  extends: .run-test\n  variables:\n    - PLATFORM: linux\n```\n\nParallel matrix jobs were released with GitLab 13.3 and allow you to create jobs at runtime based on specified variables. Let's say there is a need to run multiple instances a job with different variables values for each instance — with a combination of `parallel:` and `matrix:` you accomplish just that.\n\n```yaml\ntest:\n  stage: test\n  script: run-test $PLATFORM\n  parallel:\n    matrix:\n      - PLATFORM: [windows, mac, linux]\n```\n\nBy using `parallel:` and `matrix:`, big pipelines can be broken down into manageable parts for efficient maintainance.\n\n## Reduce the risk of merge conflicts with parent/child pipelines\n\n![Parent-child pipelines can include external YAML files in you configuration](https://about.gitlab.com/images/blogimages/parent-child-explained.jpeg)\nThe parent pipeline generates a child pipeline via the trigger:include keywords.\n{: .note.text-center}\n\nFor better management of dependencies, many organizations prefer a mono-repo setup for their projects. But mono-repos have a flip side too. If a repository hosts a large number of projects and a single pipeline definition is used to trigger different automated processes for different components, the pipeline performance is negatively affected. By using [parent-child pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html) you can design more efficient pipelines, since you can have multiple child-pipelines that run in parallel. The keyword `include:` is used to include external YAML files in your CI/CD configuration for this purpose. In the image above a pipeline (the parent) generates a child pipeline via the trigger:include keywords.\n\nThis approach also reduces the chances of merge conflicts from happening, as it allows to only edit a section of the pipeline if necessary.\n\n## Merge trains help the target branch stay stable\n\nWhen there's a lot of merge requests flowing into a project, there is a risk of merge conflicts. [Merge trains](https://docs.gitlab.com/ee/ci/pipelines/merge_trains.html) is a powerful feature by GitLab that allows users to automatically merge a series of (queued) merge requests without breaking the target branch. Using this feature, you can add an MR to the train, and it would take care of it until it is merged.\n\n## Use multiple caches in the same job\n\nStarting 13.11, GitLab CI/CD provides the ability to [configure multiple cache keys in a single job](/releases/2021/04/22/gitlab-13-11-released/#use-multiple-caches-in-the-same-job) which will help you increase your pipeline performance. This functionality could help you save precious development time when the jobs are running.\n\n## How can an efficient pipeline save you money?\n\nBy using CI/CD strategies that ensure safe merging of new changes and a green master, organizations can worry less about unanticipated downtimes caused by infrastructural failures and code conflicts.\n\nWith faster pipelines, developers end up spending lesser time in maintenance and find time and space to bring in more thoughtfulness and creativity in their work, leading to improvements in code quality and the company atmosphere and morale.\n\nIf you are looking to bring down the cost of running your CI/CD pipelines for a large project, look up the [Artifact and cache settings](https://docs.gitlab.com/ee/ci/runners/configure_runners.html#artifact-and-cache-settings) and [Optimizing GitLab for large repositories](https://docs.gitlab.com/ee/ci/large_repositories/) sections in the documentation.\n",[832,937,749,915],{"slug":3837,"featured":6,"template":678},"how-to-become-more-productive-with-gitlab-ci","content:en-us:blog:how-to-become-more-productive-with-gitlab-ci.yml","How To Become More Productive With Gitlab Ci","en-us/blog/how-to-become-more-productive-with-gitlab-ci.yml","en-us/blog/how-to-become-more-productive-with-gitlab-ci",{"_path":3843,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3844,"content":3850,"config":3855,"_id":3857,"_type":16,"title":3858,"_source":17,"_file":3859,"_stem":3860,"_extension":20},"/en-us/blog/why-we-are-building-the-gitlab-environment-toolkit-to-help-deploy-gitlab-at-scale",{"title":3845,"description":3846,"ogTitle":3845,"ogDescription":3846,"noIndex":6,"ogImage":3847,"ogUrl":3848,"ogSiteName":692,"ogType":693,"canonicalUrls":3848,"schema":3849},"The next step in performance testing? The GitLab Environment Toolkit","Learn how we're building a new toolkit to help with performance testing and deploying GitLab at scale.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682030/Blog/Hero%20Images/gitlab_environment_toolkit_scale.jpg","https://about.gitlab.com/blog/why-we-are-building-the-gitlab-environment-toolkit-to-help-deploy-gitlab-at-scale","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The next step in performance testing? The GitLab Environment Toolkit\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Grant Young\"}],\n        \"datePublished\": \"2021-06-15\",\n      }",{"title":3845,"description":3846,"authors":3851,"heroImage":3847,"date":3852,"body":3853,"category":14,"tags":3854},[911],"2021-06-15","\n\nLast year I wrote about how the [Quality Engineering Enablement team](/handbook/engineering/quality/) was [building up the performance testing of GitLab](/blog/how-were-building-up-performance-testing-of-gitlab/) with the [GitLab Performance Tool (GPT)](https://gitlab.com/gitlab-org/quality/performance). Last year, the biggest challenge with performance testing wasn't so much the testing but rather setting up the right large scale GitLab environments to test against.\n\nLike any server application, deploying at scale is challenging. That's why we built another toolkit that automates the deployment of GitLab at scale: The [GitLab Environment Toolkit (GET)](https://gitlab.com/gitlab-org/quality/gitlab-environment-toolkit).\n\n![GitLab Environment Toolkit logo](https://about.gitlab.com/images/blogimages/gitlab-environment-toolkit/gitlab_environment_toolkit_logo.png){: .center}\nGitLab Environment Toolkit logo\n{: .note.text-center}\n\nInternally called the \"Performance Environment Builder\" (PEB), GET grew alongside GPT as we continued to expand our performance testing efforts. Over time we built a toolkit that was quite capable in its own right of deploying GitLab at scale, which is why it started to gain attention internally from other teams and then even from some customers. Soon we realized we built something worth sharing.\n\nThe Quality Engineering Enablement team has been working hard over the last few months to polish the toolkit for broader use and we're happy to share that the first version of [GET v1.0.0](https://gitlab.com/gitlab-org/quality/gitlab-environment-toolkit/-/releases/v1.0.0) has been released!\n\nGET is a collection of well-known open source provisioning and configuration tools with a simple focused purpose - to deploy [GitLab Omnibus](https://gitlab.com/gitlab-org/omnibus-gitlab) and [GitLab Helm Charts](https://docs.gitlab.com/charts/) at scale, as defined by our [Reference Architectures](https://docs.gitlab.com/ee/administration/reference_architectures) and [Cloud Native Hybrid Reference Architectures](https://docs.gitlab.com/ee/administration/reference_architectures/10k_users.html#cloud-native-hybrid-reference-architecture-with-helm-charts-alternative). Built with Terraform and Ansible, GET supports the provisioning and configuring of machines and other related infrastructure and contains the following features:\n\n - Support for deploying all GitLab Reference Architectures sizes dynamically from 1000 to 50,000\n - Support for deploying Cloud Native Hybrid Reference Architectures (GCP only at this time)\n - GCP, AWS, and Azure cloud provider support\n - Upgrades\n - Release and nightly Omnibus builds support\n - Advanced search with Elasticsearch\n - Geo support\n - Zero Downtime Upgrades support\n - Built-in load balancing via HAProxy and monitoring (Prometheus, Grafana) support\n\nWe're just getting started with GET, and [continue to add more support for features and different environment setups](https://gitlab.com/gitlab-org/quality/gitlab-environment-toolkit/-/boards?group_by=epic). Now that GET [v1.0.0](https://gitlab.com/gitlab-org/quality/gitlab-environment-toolkit/-/releases/v1.0.0) has been released, we're at a good place for customers to start trialing and evaluating GET. We do ask that you take into consideration the continuing expansion of capabilities, as well as limitations of the current version.\n\nRead on to learn about the the philosophy of GET and how it works.\n\n## The design principals of GET\n\nOur team has past experience with provisioning and configuration tools, so we've learned what does and does not work, which is why we try to stick to the following goals:\n\n- GET is [boring](https://handbook.gitlab.com/handbook/values/#boring-solutions): The word boring may look funny here but it's actually a [GitLab value](https://handbook.gitlab.com/handbook/values/). A boring solution essentially means to keep it simple. Provisioning and configuration solutions can get complicated **fast** with many common pitfalls, such as trying to support complex setups that come with a heavy maintenance cost. From the very beginning we've tried to avoid this, so GET essentially uses a standard Terraform and Ansible config that doesn't try to do anything fancy or complicated.\n- GET is *not* a replacement for [GitLab Omnibus](https://gitlab.com/gitlab-org/omnibus-gitlab) or the [Helm Charts](https://docs.gitlab.com/charts/): Truly some of the greatest \"magic\" in setting up GitLab is how much easier it's made Omnibus and the Helm Charts. Thanks to the incredible work by our Distribution teams, both of these install methods do a lot under the hood, and GET is not trying to replace these. In the same [boring](https://handbook.gitlab.com/handbook/values/#boring-solutions) vein, GET's purpose is simply to set up GitLab environments at scale by installing Omnibus or Helm in the right places (along with any other needed infrastructure to support).\n- GET is one for all and designed to work for all our recommended [GitLab Reference Architectures](https://docs.gitlab.com/ee/administration/reference_architectures/). Everything we do with GET has to be considered against this goal. It means we may not be able to support niche or overly complex set ups as this will lead to complex code and heavy maintenance costs. We do aim to support recommended customizations where appropriate.\n\nNext we look at how GET works at a high level, starting with provisioning with Terraform.\n\n## Provisioning the environment with Terraform\n\nThe first step to building an environment is to provision the machines and/or Kubernetes clusters that run GitLab. We undergo this process with the well-known provisioning tool, [Terraform](https://www.terraform.io/).\n\nNext, we've created multiple [Terraform modules](https://www.terraform.io/docs/language/modules/develop/index.html) in GET for each of the main big three cloud providers (GCP, AWS and Azure) that provision machines for you, according to the appropriate [reference architectures](https://docs.gitlab.com/ee/administration/reference_architectures/), along with the necessary supporting infrastructure, such as firewalls, load balancers, etc. We designed these modules to be as simple as possible and only require minimal configuration.\n\nFor more information on the entire Terraform configuration, [check out our docs](https://gitlab.com/gitlab-org/quality/gitlab-environment-toolkit/-/blob/master/docs/environment_provision.md). An example of one of the main config files is `environment.tf`, which defines how each component's nodes should be setup. Below is an example of how it is configured with GCP for a [10k reference architecture](https://docs.gitlab.com/ee/administration/reference_architectures/10k_users.html) environment:\n\n```tf\nmodule \"gitlab_ref_arch_gcp\" {\n  source = \"../../modules/gitlab_ref_arch_gcp\"\n\n  prefix = var.prefix\n  project = var.project\n\n  object_storage_buckets = [\"artifacts\", \"backups\", \"dependency-proxy\", \"lfs\", \"mr-diffs\", \"packages\", \"terraform-state\", \"uploads\"]\n\n  # 10k\n  consul_node_count = 3\n  consul_machine_type = \"n1-highcpu-2\"\n\n  elastic_node_count = 3\n  elastic_machine_type = \"n1-highcpu-16\"\n\n  gitaly_node_count = 3\n  gitaly_machine_type = \"n1-standard-16\"\n\n  praefect_node_count = 3\n  praefect_machine_type = \"n1-highcpu-2\"\n\n  praefect_postgres_node_count = 1\n  praefect_postgres_machine_type = \"n1-highcpu-2\"\n\n  gitlab_nfs_node_count = 1\n  gitlab_nfs_machine_type = \"n1-highcpu-4\"\n\n  gitlab_rails_node_count = 3\n  gitlab_rails_machine_type = \"n1-highcpu-32\"\n\n  haproxy_external_node_count = 1\n  haproxy_external_machine_type = \"n1-highcpu-2\"\n  haproxy_external_external_ips = [var.external_ip]\n  haproxy_internal_node_count = 1\n  haproxy_internal_machine_type = \"n1-highcpu-2\"\n\n  monitor_node_count = 1\n  monitor_machine_type = \"n1-highcpu-4\"\n\n  pgbouncer_node_count = 3\n  pgbouncer_machine_type = \"n1-highcpu-2\"\n\n  postgres_node_count = 3\n  postgres_machine_type = \"n1-standard-4\"\n\n  redis_cache_node_count = 3\n  redis_cache_machine_type = \"n1-standard-4\"\n  redis_sentinel_cache_node_count = 3\n  redis_sentinel_cache_machine_type = \"n1-standard-1\"\n  redis_persistent_node_count = 3\n  redis_persistent_machine_type = \"n1-standard-4\"\n  redis_sentinel_persistent_node_count = 3\n  redis_sentinel_persistent_machine_type = \"n1-standard-1\"\n\n  sidekiq_node_count = 4\n  sidekiq_machine_type = \"n1-standard-4\"\n}\n\noutput \"gitlab_ref_arch_gcp\" {\n  value = module.gitlab_ref_arch_gcp\n}\n````\n\nWith this environment and [two other small config files in place](https://gitlab.com/gitlab-org/quality/gitlab-environment-toolkit/-/blob/master/docs/environment_provision.md#2-setup-the-environments-config) Terraform can be run normally and work its magic. Below is a snippet of the output you'll see with GCP:\n\n```\n[...]\n\nmodule.gitlab_ref_arch_gcp.module.redis_sentinel_cache.google_compute_instance.gitlab[2]: Creating...\nmodule.gitlab_ref_arch_gcp.module.pgbouncer.google_compute_instance.gitlab[2]: Still creating... [10s elapsed]\nmodule.gitlab_ref_arch_gcp.module.pgbouncer.google_compute_instance.gitlab[0]: Still creating... [10s elapsed]\nmodule.gitlab_ref_arch_gcp.module.consul.google_compute_instance.gitlab[1]: Creation complete after 15s\nmodule.gitlab_ref_arch_gcp.module.redis_sentinel_cache.google_compute_instance.gitlab[1]: Creating...\nmodule.gitlab_ref_arch_gcp.module.gitlab_nfs.google_compute_instance.gitlab[0]: Creation complete after 25s\nmodule.gitlab_ref_arch_gcp.module.redis_persistent.google_compute_instance.gitlab[1]: Creating...\nmodule.gitlab_ref_arch_gcp.module.gitaly.google_compute_instance.gitlab[1]: Creation complete after 14s\nmodule.gitlab_ref_arch_gcp.module.redis_persistent.google_compute_instance.gitlab[2]: Creating...\nmodule.gitlab_ref_arch_gcp.module.gitaly.google_compute_instance.gitlab[0]: Creation complete after 15s\nmodule.gitlab_ref_arch_gcp.module.redis_persistent.google_compute_instance.gitlab[0]: Creating...\nmodule.gitlab_ref_arch_gcp.module.redis_sentinel_cache.google_compute_instance.gitlab[0]: Still creating... [10s elapsed]\nmodule.gitlab_ref_arch_gcp.module.pgbouncer.google_compute_instance.gitlab[1]: Creation complete after 15s\nmodule.gitlab_ref_arch_gcp.module.pgbouncer.google_compute_instance.gitlab[2]: Creation complete after 15s\nmodule.gitlab_ref_arch_gcp.module.pgbouncer.google_compute_instance.gitlab[0]: Creation complete after 15s\nmodule.gitlab_ref_arch_gcp.module.redis_sentinel_cache.google_compute_instance.gitlab[0]: Creation complete after 15s\nmodule.gitlab_ref_arch_gcp.module.gitaly.google_compute_instance.gitlab[2]: Still creating... [20s elapsed]\nmodule.gitlab_ref_arch_gcp.module.redis_sentinel_cache.google_compute_instance.gitlab[2]: Still creating... [10s elapsed]\nmodule.gitlab_ref_arch_gcp.module.redis_sentinel_cache.google_compute_instance.gitlab[1]: Still creating... [10s elapsed]\nmodule.gitlab_ref_arch_gcp.module.redis_persistent.google_compute_instance.gitlab[1]: Still creating... [10s elapsed]\nmodule.gitlab_ref_arch_gcp.module.redis_persistent.google_compute_instance.gitlab[2]: Still creating... [10s elapsed]\nmodule.gitlab_ref_arch_gcp.module.redis_persistent.google_compute_instance.gitlab[0]: Still creating... [10s elapsed]\nmodule.gitlab_ref_arch_gcp.module.gitaly.google_compute_instance.gitlab[2]: Creation complete after 25s\nmodule.gitlab_ref_arch_gcp.module.redis_sentinel_cache.google_compute_instance.gitlab[2]: Creation complete after 15s\nmodule.gitlab_ref_arch_gcp.module.redis_sentinel_cache.google_compute_instance.gitlab[1]: Creation complete after 15s\nmodule.gitlab_ref_arch_gcp.module.redis_persistent.google_compute_instance.gitlab[1]: Creation complete after 15s\nmodule.gitlab_ref_arch_gcp.module.redis_persistent.google_compute_instance.gitlab[0]: Creation complete after 15s\nmodule.gitlab_ref_arch_gcp.module.redis_persistent.google_compute_instance.gitlab[2]: Creation complete after 15s\nReleasing state lock. This may take a few moments...\n\nApply complete! Resources: 90 added, 0 changed, 0 destroyed.\n```\n\nOnce it's done, you should have a full set of machines for GitLab that can be configured with Ansible, which is what we'll look at next.\n\n## How to configure the environment with Ansible\n\nThe next step for setting up the environment is configuring [Ansible](https://www.ansible.com/). In a nutshell, this tool connects to each machine via SSH and runs tasks to configure GitLab.\n\nLike with Terraform, [we've created multiple roles](https://docs.ansible.com/ansible/latest/user_guide/playbooks_reuse_roles.html) and [Playbooks](https://docs.ansible.com/ansible/latest/user_guide/playbooks_intro.html) in GET that are designed to configure each component on the intended machine. Through Terraform, we apply labels to each machine that Ansible then tracks using its [dynamic inventory](https://docs.ansible.com/ansible/latest/user_guide/intro_dynamic_inventory.html) to define the purpose of each machine.\n\nA detailed breakdown of the configuration process is available in the [GET for Ansible docs](https://gitlab.com/gitlab-org/quality/gitlab-environment-toolkit/-/blob/master/docs/environment_provision.md). But, an example one of the main config files is `environment.tf`, which defines how the nodes of each component should be setup. Below is an example of how it looks with GCP for a [10k user reference architecture](https://docs.gitlab.com/ee/administration/reference_architectures/10k_users.html) environment:\n\nLike we did before with Terraform, we'll highlight one of the main config files, but you can see the full process in the [docs](https://gitlab.com/gitlab-org/quality/gitlab-environment-toolkit/-/blob/master/docs/environment_configure.md). The file is `vars.yml`, an inventory variable file for your environment that contains various parts of the config Ansible needs to perform the setup, along with key GitLab config:\n\n```yml\nall:\n  vars:\n    # Ansible Settings\n    ansible_user: \"\u003Cssh_username>\"\n    ansible_ssh_private_key_file: \"\u003Cprivate_ssh_key_path>\"\n\n    # Cloud Settings\n    cloud_provider: \"gcp\"\n    gcp_project: \"\u003Cgcp_project_id>\"\n    gcp_service_account_host_file: \"\u003Cgcp_service_account_host_file_path>\"\n\n    # General Settings\n    prefix: \"\u003Cenvironment_prefix>\"\n    external_url: \"\u003Cexternal_url>\"\n    gitlab_license_file: \"\u003Cgitlab_license_file_path>\"\n\n    # Object Storage Settings\n    gitlab_object_storage_artifacts_bucket: \"{{ prefix }}-artifacts\"\n    gitlab_object_storage_backups_bucket: \"{{ prefix }}-backups\"\n    gitlab_object_storage_dependency_proxy_bucket: \"{{ prefix }}-dependency-proxy\"\n    gitlab_object_storage_external_diffs_bucket: \"{{ prefix }}-mr-diffs\"\n    gitlab_object_storage_lfs_bucket: \"{{ prefix }}-lfs\"\n    gitlab_object_storage_packages_bucket: \"{{ prefix }}-packages\"\n    gitlab_object_storage_terraform_state_bucket: \"{{ prefix }}-terraform-state\"\n    gitlab_object_storage_uploads_bucket: \"{{ prefix }}-uploads\"\n\n    # Passwords / Secrets - Can also be set as Environment Variables via ansible.builtin.env\n    gitlab_root_password: \"\u003Cgitlab_root_password>\"\n    grafana_password: \"\u003Cgrafana_password>\"\n    postgres_password: \"\u003Cpostgres_password>\"\n    consul_database_password: \"\u003Cconsul_database_password>\"\n    gitaly_token: \"\u003Cgitaly_token>\"\n    pgbouncer_password: \"\u003Cpgbouncer_password>\"\n    redis_password: \"\u003Credis_password>\"\n    praefect_external_token: \"\u003Cpraefect_external_token>\"\n    praefect_internal_token: \"\u003Cpraefect_internal_token>\"\n    praefect_postgres_password: \"\u003Cpraefect_postgres_password>\"\n```\n\nWith the variable file and the [environment inventory configured](https://gitlab.com/gitlab-org/quality/gitlab-environment-toolkit/-/blob/master/docs/environment_configure.md#2-setup-the-environments-dynamic-inventory) Ansible can run normally. Here is a snippet of the output you'll see with GCP:\n\n```\n[...]\n\nTASK [gitlab-rails : Update Postgres primary IP and Port] **********************\nok: [gitlab-qa-10k-gitlab-rails-1]\nTASK [gitlab-rails : Setup GitLab deploy node config file with DB Migrations] ***\nchanged: [gitlab-qa-10k-gitlab-rails-1]\nTASK [gitlab-rails : Reconfigure GitLab deploy node] ***************************\nchanged: [gitlab-qa-10k-gitlab-rails-1]\nTASK [gitlab-rails : Setup all GitLab Rails config files] **********************\nchanged: [gitlab-qa-10k-gitlab-rails-1]\nok: [gitlab-qa-10k-gitlab-rails-3]\nok: [gitlab-qa-10k-gitlab-rails-2]\nTASK [gitlab-rails : Reconfigure all GitLab Rails] *****************************\nchanged: [gitlab-qa-10k-gitlab-rails-1]\nchanged: [gitlab-qa-10k-gitlab-rails-3]\nchanged: [gitlab-qa-10k-gitlab-rails-2]\nTASK [gitlab-rails : Restart GitLab] *******************************************\nchanged: [gitlab-qa-10k-gitlab-rails-3]\nchanged: [gitlab-qa-10k-gitlab-rails-1]\nchanged: [gitlab-qa-10k-gitlab-rails-2]\n\n[...]\n\nPLAY RECAP *********************************************************************\ngitlab-qa-10k-consul-1     : ok=29   changed=17   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-consul-2     : ok=28   changed=16   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-consul-3     : ok=28   changed=16   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-elastic-1    : ok=41   changed=9    unreachable=0    failed=0    skipped=61   rescued=0    ignored=0\ngitlab-qa-10k-elastic-2    : ok=37   changed=7    unreachable=0    failed=0    skipped=62   rescued=0    ignored=0\ngitlab-qa-10k-elastic-3    : ok=37   changed=7    unreachable=0    failed=0    skipped=62   rescued=0    ignored=0\ngitlab-qa-10k-gitaly-1     : ok=27   changed=15   unreachable=0    failed=0    skipped=30   rescued=0    ignored=0\ngitlab-qa-10k-gitaly-2     : ok=26   changed=14   unreachable=0    failed=0    skipped=30   rescued=0    ignored=0\ngitlab-qa-10k-gitaly-3     : ok=26   changed=14   unreachable=0    failed=0    skipped=30   rescued=0    ignored=0\ngitlab-qa-10k-gitlab-nfs-1 : ok=28   changed=7    unreachable=0    failed=0    skipped=55   rescued=0    ignored=0\ngitlab-qa-10k-gitlab-rails-1 : ok=41   changed=21   unreachable=0    failed=0    skipped=32   rescued=0    ignored=0\ngitlab-qa-10k-gitlab-rails-2 : ok=35   changed=16   unreachable=0    failed=0    skipped=33   rescued=0    ignored=0\ngitlab-qa-10k-gitlab-rails-3 : ok=35   changed=16   unreachable=0    failed=0    skipped=33   rescued=0    ignored=0\ngitlab-qa-10k-haproxy-external-1 : ok=40   changed=8    unreachable=0    failed=0    skipped=62   rescued=0    ignored=0\ngitlab-qa-10k-haproxy-internal-1 : ok=39   changed=8    unreachable=0    failed=0    skipped=60   rescued=0    ignored=0\ngitlab-qa-10k-monitor-1    : ok=43   changed=19   unreachable=0    failed=0    skipped=35   rescued=0    ignored=0\ngitlab-qa-10k-pgbouncer-1  : ok=30   changed=17   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-pgbouncer-2  : ok=29   changed=16   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-pgbouncer-3  : ok=29   changed=16   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-postgres-1   : ok=35   changed=16   unreachable=0    failed=0    skipped=36   rescued=0    ignored=0\ngitlab-qa-10k-postgres-2   : ok=34   changed=15   unreachable=0    failed=0    skipped=36   rescued=0    ignored=0\ngitlab-qa-10k-postgres-3   : ok=34   changed=15   unreachable=0    failed=0    skipped=36   rescued=0    ignored=0\ngitlab-qa-10k-praefect-1   : ok=29   changed=18   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-praefect-2   : ok=26   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-praefect-3   : ok=26   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-praefect-postgres-1 : ok=25   changed=14   unreachable=0    failed=0    skipped=29   rescued=0    ignored=0\ngitlab-qa-10k-redis-cache-1 : ok=26   changed=15   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-redis-cache-2 : ok=25   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-redis-cache-3 : ok=25   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-redis-persistent-1 : ok=25   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-redis-persistent-2 : ok=25   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-redis-persistent-3 : ok=25   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-redis-sentinel-cache-1 : ok=25   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-redis-sentinel-cache-2 : ok=25   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-redis-sentinel-cache-3 : ok=25   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-redis-sentinel-persistent-1 : ok=25   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-redis-sentinel-persistent-2 : ok=25   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-redis-sentinel-persistent-3 : ok=25   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-sidekiq-1    : ok=28   changed=15   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-sidekiq-2    : ok=27   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-sidekiq-3    : ok=27   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\ngitlab-qa-10k-sidekiq-4    : ok=27   changed=14   unreachable=0    failed=0    skipped=28   rescued=0    ignored=0\nlocalhost                  : ok=18   changed=3    unreachable=0    failed=0    skipped=38   rescued=0    ignored=0\n```\n\nOnce Ansible is done, you should have a fully running GitLab environment at scale!\n\n## What's next?\n\nWe've got a bunch of things planned for GET so it can support more features when setting up GitLab, such as SSL support, [cloud native hybrid architectures](/blog/cloud-native-architectures-made-easy/) on other cloud providers, object storage customization, and much more. We know deploying production-ready server applications is hard and has many potential requirements depending on the customer, and we hope to eventually support all recommended setups.\n\nCheck out the [GET development board](https://gitlab.com/gitlab-org/quality/gitlab-environment-toolkit/-/boards?group_by=epic) and our [issue list](https://gitlab.com/gitlab-org/quality/gitlab-environment-toolkit/-/issues) to see what is in progress. Share feedback and suggestions by adding to our issue lists, we're keen to hear what's important to customers.\n\n[Cover image](https://unsplash.com/photos/icdVDptHxpM) by [Jean Vella](https://unsplash.com/@jean_vella?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText).\n{: .note}\n",[915,704],{"slug":3856,"featured":6,"template":678},"why-we-are-building-the-gitlab-environment-toolkit-to-help-deploy-gitlab-at-scale","content:en-us:blog:why-we-are-building-the-gitlab-environment-toolkit-to-help-deploy-gitlab-at-scale.yml","Why We Are Building The Gitlab Environment Toolkit To Help Deploy Gitlab At Scale","en-us/blog/why-we-are-building-the-gitlab-environment-toolkit-to-help-deploy-gitlab-at-scale.yml","en-us/blog/why-we-are-building-the-gitlab-environment-toolkit-to-help-deploy-gitlab-at-scale",{"_path":3862,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3863,"content":3869,"config":3875,"_id":3877,"_type":16,"title":3878,"_source":17,"_file":3879,"_stem":3880,"_extension":20},"/en-us/blog/how-to-get-gitops-right-with-iac-security",{"title":3864,"description":3865,"ogTitle":3864,"ogDescription":3865,"noIndex":6,"ogImage":3866,"ogUrl":3867,"ogSiteName":692,"ogType":693,"canonicalUrls":3867,"schema":3868},"How to get GitOps right with infrastructure as code security","Learn how the GitLab and Indeni integration makes security a core component of your GitOps workflow.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663403/Blog/Hero%20Images/gitops-partner-cover-image.jpg","https://about.gitlab.com/blog/how-to-get-gitops-right-with-iac-security","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to get GitOps right with infrastructure as code security\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ulrica de Fort-Menares\"}],\n        \"datePublished\": \"2021-06-10\",\n      }",{"title":3864,"description":3865,"authors":3870,"heroImage":3866,"date":3872,"body":3873,"category":14,"tags":3874},[3871],"Ulrica de Fort-Menares","2021-06-10","\nIn today's competitive digital era, it is imperative for organizations to undergo a digital transformation to effectively compete. For many, achieving a digital transformation means transitioning toward a DevOps model.\n\nDevOps has been around for many years, and the development side of the house has benefitted from the core practices of DevOps. However, the infrastructure side of the house has been lagging behind, particularly when it comes to speed. With [infrastructure as code (IaC)](/topics/gitops/infrastructure-as-code/) and [GitOps](/topics/gitops/), infrastructure teams have been able to apply the same disciplines and quality gates that are used to manage application code to the infrastructure - to deliver products faster, with more predictability and at scale.\n\n## Security slowing down delivery\n\nWhile the GitOps concept promises faster and more frequent deployment, the last thing you want is to be slowed down by your legacy security programs. How often has your release stopped near the end of process because it failed the security gate? All too often security testing is tacked on at the end of delivery. Developers inevitably spend significant time and energy investigating these security issues, which delays the release. Uncovering issues late in the cycle is expensive and painful to fix, not to mention creating unnecessary stress.\n\nThe software development process has been shifting left to deliver better-quality software faster. By using IaC, you can adopt the same DevOps principle for the infrastructure. Learning from the development world, you should integrate security controls into the development lifecycle early and everywhere.\n\n## How to shift your IaC security checks left\n\nThe core of the partnership between Indeni and GitLab is about making security a key part of the GitOps practice. The [Indeni Cloudrail](https://indeni.com/cloudrail/) and GitLab CI/CD integration brings IaC security into the tools that developers are familiar with and want to use.\n\n![GitOps workflow](https://about.gitlab.com/images/blogimages/secure-gitops-workflow.png){: .shadow}\nHow GitLab CI/CD fits into the Indeni Cloudrail DevOps workflow.\n{: .note.text-center}\n\nThe joint solution modernizes security programs with the shift-left approach and automates infrastructure compliance. Developers no longer need to get in line for security reviews. Instead, IaC will be automatically evaluated for security impacts. Security controls are integrated into the development lifecycle before deployment.\n\n![GitOps workflow](https://about.gitlab.com/images/blogimages/secure-gitops1.jpg){: .shadow}\nCatching IaC security violations in GitLab CI/CD.\n{: .note.text-center}\n\nAs shown in the example above, Indeni Cloudrail provides feedback in GitLab CI. This way, security risks relating to the infrastructure can be instantly remediated when they are made so developers can move fast. You can think of the shift security left approach as testing IaC continuously and preventing insecure infrastructure from being deployed.\n\n## Don't let those noisy security tools impede your GitOps practice\n\nSecurity tools are notorious for being noisy with their many false positives. According to the Advanced Technology Academic Research Center [(ATARC) Federal DevSecOps Landscape survey](https://atarc.org/project/devsecops-survey/), too many false positives is the number one frustration with security testing. A noisy security tool can be counterproductive by inadvertently stopping the pipeline frustrating your developers.\n\nWhat makes Indeni Cloudrail unique is its context-based analysis, which refers to its ability to understand the relationships among cloud resources, making in-depth security analyses possible. Cloudrail also factors in already existing resources in the cloud environment to gain a holistic view as part of its analysis. The end result is three times less noise than any comparable IaC security tools in the market. In essence, Cloudrail will only bother developers with problems that truly matter to the organization. Learn more about [what makes Cloudrail unique in this blog post](https://indeni.com/blog/comparing-cloudrail-checkov-tfsec-and-kics-with-testing/).\n\n## Why GitLab and Indeni are better together\n\nBy delivering a developer-centric security tool for IaC, security has a better chance of gaining acceptance in the developer community. Together, Indeni and GitLab equip developers with the right tools to support a GitOps model and help organizations with their digital transformation.\n\n## Watch the demo\n\nWatch the Cloudrail demo to see the GitOps workflow for IaC security.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/9WSd0D87Vxc\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### About Indeni\n\n_[Indeni](https://indeni.com/) automates best practices for network security and cloud security. Its security infrastructure platform automates health and compliance checks for leading firewalls to maximize uptime and efficiency. Its Infrastructure-as-Code security analysis tool, Cloudrail, automates infrastructure compliance to prevent insecure cloud environments from being deployed._\n\nCover image by [Dimitry Anikin](https://unsplash.com/@anikinearthwalker) on [Unsplash](https://unsplash.com/photos/DsmjpJzm2i0)\n",[535,1307,894,232],{"slug":3876,"featured":6,"template":678},"how-to-get-gitops-right-with-iac-security","content:en-us:blog:how-to-get-gitops-right-with-iac-security.yml","How To Get Gitops Right With Iac Security","en-us/blog/how-to-get-gitops-right-with-iac-security.yml","en-us/blog/how-to-get-gitops-right-with-iac-security",{"_path":3882,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3883,"content":3889,"config":3896,"_id":3898,"_type":16,"title":3899,"_source":17,"_file":3900,"_stem":3901,"_extension":20},"/en-us/blog/how-to-protect-your-source-code-with-gitlab-and-jscrambler",{"title":3884,"description":3885,"ogTitle":3884,"ogDescription":3885,"noIndex":6,"ogImage":3886,"ogUrl":3887,"ogSiteName":692,"ogType":693,"canonicalUrls":3887,"schema":3888},"How to protect your source code with GitLab and Jscrambler","Learn how to seamlessly protect your source code at build time in just a few steps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669529/Blog/Hero%20Images/gitlab-jscrambler-blog-post-protecting-source-code.png","https://about.gitlab.com/blog/how-to-protect-your-source-code-with-gitlab-and-jscrambler","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to protect your source code with GitLab and Jscrambler\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Pedro Fortuna\"},{\"@type\":\"Person\",\"name\":\"Sam Kerr\"}],\n        \"datePublished\": \"2021-06-09\",\n      }",{"title":3884,"description":3885,"authors":3890,"heroImage":3886,"date":3893,"body":3894,"category":14,"tags":3895},[3891,3892],"Pedro Fortuna","Sam Kerr","2021-06-09","\nDevelopment teams are building, testing, and shipping code faster than ever before. Today, we know that security has a role to play at the early stages of the [DevOps workflow](/topics/devops/), but these security controls are mostly centered around finding and fixing bugs and vulnerabilities during development.\n\nIn this tutorial, we will explore the importance of protecting client-side application code at runtime and guide you through implementing it in your GitLab instance using the integration with [Jscrambler](https://jscrambler.com/).\n\n## The importance of runtime code protection\n\nWith web and mobile applications dealing with increasingly sensitive data, addressing the application's attack surface requires considering additional threats that are not directly linked to vulnerabilities.\n\nThis concern has been widely covered in NIST, ISO 27001, and some of the latest iterations of OWASP guides, such as the [Mobile Application Security Verification Standard](https://mobile-security.gitbook.io/masvs/). These information security standards highlight that attackers who gain unwarranted access to the application's source code may be able to retrieve proprietary code, find ways to bypass app restrictions, and make more progress while planning/automating data exfiltration attacks.\n\nAs such, it's important that companies implement an additional security layer (on top of application security best practices) to tackle the threats of tampering and reverse engineering of an application's source code.\n\n## Getting started with Jscrambler + GitLab\n\nA robust code protection approach must include multiple layers to raise the bar for reverse-engineering and tampering attempts. Jscrambler achieves this by using a combination of code protection techniques, including obfuscation, code locks, runtime protection, and threat monitoring.\n\nLet's see how you can easily set up this layered source code protection using Jscrambler in your GitLab instance.\n\n### What you need for the Jscrambler integration\n\nTo use this integration with Jscrambler, make sure that you meet the following prerequisites:\n\n* A JavaScript-based project, as Jscrambler can protect JavaScript-based web and hybrid mobile apps\n* A [Jscrambler account](https://jscrambler.com/signup)\n* A GitLab instance where the Jscrambler integration will run\n\n### How to configure Jscrambler\n\nThe first step of this integration is to define the Jscrambler code protection techniques you want to use. The best way to do this is through the [Jscrambler web app](https://app.jscrambler.com/). You can either select one of the pre-defined templates or pick techniques one by one. Review [the Jscrambler guide](https://blog.jscrambler.com/jscrambler-101-first-use/) for further instructions on choosing Jscrambler techniques. No matter what you choose, download Jscrambler's JSON configuration file by clicking the download button next to the Application Settings, as shown below.\n\n![Jscrambler_download_JSON](https://about.gitlab.com/images/blogimages/jscrambler-app-download-json.gif \"How to download Jscrambler's JSON config.\")\nHow to download Jscrambler's JSON config.\n{: .note.text-center}\n\nPlace the file you just downloaded in your project's root folder and rename it to `.jscramblerrc`. Now, open the file and make sure you remove the access and secret keys from this configuration file by removing the following lines.\n\n```json\n \"keys\": {\n   \"accessKey\": \"***********************\",\n   \"secretKey\": \"***********************\"\n },\n```\n\nThis will prevent having hardcoded API keys, which could pose security issues. You should store these API keys using the [GitLab CI environment variables](https://docs.gitlab.com/ee/ci/variables/), as shown below.\n\n![Jscrambler API keys as GitLab environment variables](https://docs.jscrambler.com/637a78d94e016c8be1866edb0627f2bc.png)\nWhere to score Jscrambler's API keys in GitLab.\n{: .note.text-center}\n\nAnd that's all you need from Jscrambler's side!\n\n### Configuring a Jscrambler job inside GitLab CI\n\nStart by checking you have placed the `.gitlab-ci.yml` file at the root of your project. Inside this file, you will need to define your `build` stage, as well as add a new `protect` stage, as shown below.\n\n```yml\nstages:\n - build\n - protect\n # - deploy\n # ...\n```\n\nThe `build` stage should be configured as follows:\n\n```yml\nbuild:production:\n stage: build\n artifacts:\n   when: on_success\n   paths:\n     - build\n script:\n   - npm i\n   - npm run build\n```\n\nThis configuration will run the `npm run build` command, which is a standard way of building your app to production, placing the resulting production files in the `/build` folder. Plus, it ensures that the `/build` folder becomes available as a [GitLab CI artifact](https://docs.gitlab.com/ee/ci/pipelines/job_artifacts.html) so that it can be used later in other jobs.\n\nHere, make sure that you set the build commands and build folder according to your own project, as these may vary.\n\nNext, configure the `protect` stage as shown below:\n\n```yml\nbuild:production:obfuscated:\n stage: protect\n before_script:\n   - npm i -g jscrambler\n dependencies:\n   - build:production\n artifacts:\n   name: \"$CI_JOB_NAME\"\n   when: on_success\n   paths:\n     - build\n   expire_in: 1 week\n script:\n   # By default, all artifacts from previous stages are passed to each job.\n   - jscrambler -a $JSCRAMBLER_ACCESS_KEY -s $JSCRAMBLER_SECRET_KEY -o ./ build/**/*.*\n```\n\nThis stage starts by installing the Jscrambler npm package globally. Next, it is configured to execute Jscrambler at the end of each new production build process. Typically, you will want to ensure that Jscrambler is the last stage of your build process, because Jscrambler transforms the source code extensively and can also add [anti-tampering protections](https://docs.jscrambler.com/code-integrity/documentation/transformations/self-defending). This means changing the files after they have been protected by Jscrambler may break the app functionality.\n\nThis `protect` stage is configured to access the Jscrambler API keys that have been loaded as GitLab environment variables. Finally, the output of the protection is placed into the same `/build` folder and made available as a GitLab CI artifact for posterior use (e.g., a deploy job).\n\nNote that while this example shows how to use the Jscrambler CLI client to protect the code, Jscrambler is compatible with [other clients](https://docs.jscrambler.com/code-integrity/documentation/api/clients), such as Grunt, Gulp, webpack, Ember, and Metro (React Native).\n\nAnd, that's all there is to it! You can configure your `deploy` stage as usual, which should access the contents of the `build/` folder and ensure your protected files are available in a live production environment.\n\n### Checking the protection result\n\nAs a final (optional) step, you might want to check the live app and see what its source code looks like. You can do that easily by using a browser debugger and opening the files from the \"Sources\" tab. The protected code should look completely unintelligible, similar to the one shown below.\n\n![Source code protected by Jscrambler](https://i.imgur.com/HXLZyFh.png)\nExample of murky source code protected by Jscrambler.\n{: .note.text-center}\n\nJust bear in mind that, in case you are using Jscrambler's anti-debugging transformations, your browser debugger will likely crash or derail the app execution. This is intended behavior, which is very useful to prevent reverse-engineering of the code.\n\n## Final thoughts\n\nAs we saw in this tutorial, setting up this integration between Jscrambler and GitLab is very straightforward. It introduces a new `protect` stage where the JavaScript source code is protected by Jscrambler before deployment.\n\nJscrambler goes well beyond JavaScript obfuscation since it provides runtime protection techniques such as [self defending](https://docs.jscrambler.com/code-integrity/documentation/transformations/self-defending) and [self healing](https://docs.jscrambler.com/code-integrity/documentation/transformations/self-healing), which provide anti-tampering and anti-debugging capabilities, as well as [code locks](https://docs.jscrambler.com/code-integrity/documentation/client-side-countermeasures). For more details about Jscrambler transformations, review [Jscrambler's documentation page](https://docs.jscrambler.com/).\n\n## Watch the demo\n\nMore of a video person? Watch the demo on how to protect your source code using GitLab and Jscrambler.\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/aBx2Vtbe-1w\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[894,1307,915],{"slug":3897,"featured":6,"template":678},"how-to-protect-your-source-code-with-gitlab-and-jscrambler","content:en-us:blog:how-to-protect-your-source-code-with-gitlab-and-jscrambler.yml","How To Protect Your Source Code With Gitlab And Jscrambler","en-us/blog/how-to-protect-your-source-code-with-gitlab-and-jscrambler.yml","en-us/blog/how-to-protect-your-source-code-with-gitlab-and-jscrambler",{"_path":3903,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3904,"content":3909,"config":3915,"_id":3917,"_type":16,"title":3918,"_source":17,"_file":3919,"_stem":3920,"_extension":20},"/en-us/blog/tuto-mac-m1-gitlab-ci",{"title":3905,"description":3906,"ogTitle":3905,"ogDescription":3906,"noIndex":6,"ogImage":2478,"ogUrl":3907,"ogSiteName":692,"ogType":693,"canonicalUrls":3907,"schema":3908},"How to use Scaleway to self-host your GitLab Runners","Learn how to set up GitLab CI for your iOS and macOS projects using a hosted Mac mini M1.","https://about.gitlab.com/blog/tuto-mac-m1-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use Scaleway to self-host your GitLab Runners\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Benedikt Rollik\"}],\n        \"datePublished\": \"2021-06-07\",\n      }",{"title":3905,"description":3906,"authors":3910,"heroImage":2478,"date":3912,"body":3913,"category":14,"tags":3914},[3911],"Benedikt Rollik","2021-06-07","\nGitLab's complete DevOps platform comes with built-in continuous integration (CI) and continuous delivery (CD) via [GitLab CI/CD](https://docs.gitlab.com/ee/ci/). GitLab CI/CD is a great solution to increase developer productivity and motivation to write higher-quality code without sacrificing speed. It runs a series of tests every time a commit is pushed, providing immediate visibility into the results of changes in the codebase. While it is not a hassle to set up a CI using Linux-based machines, iOS and macOS developers may find it is more complicated to have access to a Mac that is connected and available 24 hours a day.\n\nGitLab Runners, provided on GitLab.com, are the engine that executes CI workflows. Due to various requirements, some users may opt to self-host runners on public cloud VMs. This is super easy if the build VM OS requirement is Linux-based since there are several low-cost public cloud Linux-based VM solutions. However, iOS and macOS developers may find fewer options for public cloud-delivered macOS based systems.\n\nIn this blog post tutorial, you will learn how to set up CI for iOS and macOS application development using a Scaleway Virtual Instance running the [GitLab application](https://www.scaleway.com/en/docs/install-gitlab-with-dbaas/) and a GitLab Runner that runs on a Scaleway-hosted [Mac mini M1](https://www.scaleway.com/en/hello-m1/). To complete this tutorial most successfully, we assume that you have some experience creating Xcode and GitLab projects, as well as some experiences using a Terminal and git.\n\n> **Requirements**\n>\n- You have an account and are logged into [console.scaleway.com](https://console.scaleway.com)\n- You have [configured your SSH Key](https://www.scaleway.com/en/docs/configure-new-ssh-key/)\n- You have a Virtual Instance running the GitLab InstantApp\n- **Note:** We assume you have already deployed a Virtual Instance running the GitLab InstantApp. If not, [deploy GitLab](https://www.scaleway.com/en/docs/install-gitlab-with-dbaas/) before continuing with this tutorial.\n\n### Deploying the Mac mini M1\n\n1. Log into your [Scaleway console](https://console.scaleway.com) and click on **Apple silicon** in the **Compute** section of the sidebar.\n\n   ![Orga_dashboard](https://about.gitlab.com/images/blogimages/scaleway-blog/Orga_dashboard.png){: .shadow.medium}\n   Click on the \"Apple silicon\" in the Scaleway console.\n   {: .note.text-center}\n\n1. The Apple silicon M1 as-a-Service splash screen displays. Click **Create a Mac mini M1**.\n1. Enter the details for your Mac mini M1:\n\n   - Select the geographic region in which your Mac mini M1 will be deployed.\n   - Choose the macOS version you want to run on the Mac mini M1.\n   - Select the hardware configuration for your Mac mini M1.\n   - Enter a name for your Mac mini M1.\n\n1. Click **Create a Mac mini M1** to launch the installation of your Apple silicon M1 as-a-Service.\n\n   ![M1_creation](https://about.gitlab.com/images/blogimages/scaleway-blog/M1_creation.png){: .shadow.medium}\n   Click \"Create a Mac mini M1\" to launch.\n   {: .note.text-center}\n\n1. Once deployed click **VNC** from the Mac mini M1 Overview page to launch the remote desktop client.\n\n1. Launch the **App Store** and install the **Xcode development environment** on your Mac mini M1.\n\n### Setting-up the Homebrew package manager\n\n[Homebrew](https://brew.sh/) is a package manager for macOS. It can be used to manage the software installed on your Mac. We use it to install `gitlab-runner` on your Mac mini M1.\n\n1. Click on the Terminal icon to open a new **Terminal**.\n\n1. Copy-paste the following code in the terminal application and press **Enter** to install Homebrew and the Xcode command line tools:\n\n   ```sh\n   /bin/bash -c \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)\"\n   ```\n\nLeave the terminal window open since it is required for the next step.\n\n#### Installing the GitLab Runner\n\nThe GitLab Runner is an application installed on a different computer than your GitLab host and runs jobs in a pipeline. It executes the build task on your Mac mini M1 for the code you push to your GitLab instance.\n\n1. Make sure you are still in the **Terminal** application. If you closed it after installing Homebrew, open a new one.\n\n1. Run the following command to install `gitlab-runner`:\n\n   ```\n   brew install gitlab-runner\n   ```\n\n### Configuring the Runner in GitLab\n\n   > **Note:** You require a Virtual Instance running the [GitLab InstantApp](https://www.scaleway.com/en/docs/how-to-use-the-gitlab-instant-apps/) for the following steps.\n\n1. GitLab Runner requires a registration token for the link between your GitLab Instance and the Runner. Open the GitLab web interface of your Virtual Instance and log into it.\n\n1. Select the project you want to use in GitLab with the Runner. If you don't have a project yet, click **+** > **Create Project** and fill in the required information about the project.\n\n1. On the projects overview page, click **Settings** > **CI/CD** to view the Continuous Integration settings.\n\n1. On the Continuous Integration settings page, click **Expand** in the **Runner** section to view the required information to link GitLab with your Runner.\n\n1. Scroll down to retrieve the GitLab Instance URL and the registration token.\n\n1. Run the following command in the Terminal application on your Mac to launch the configuration wizard for your GitLab Runner:\n\n   ```\n   gitlab-runner register\n   ```\n\n   Enter the required information as follows:\n\n   ```\n   Runtime platform                                    arch=arm64 os=darwin pid=810 revision=2ebc4dc4 version=13.9.0\n   WARNING: Running in user-mode.\n   WARNING: Use sudo for system-mode:\n   WARNING: $ sudo gitlab-runner...\n\n   Enter the GitLab instance URL (for example, https://gitlab.com/):\n   http://163.172.141.212/   \u003C- Enter the URL of your GitLab instance\n   Enter the registration token:\n   1mWBwzWAZSL7-pR18K3Y  \u003C- Enter the registration token for your Runner\n   Enter a description for the runner:\n   [306a20a2-2e01-4f2e-bc76-a004d35d9962]: Mac mini M1  \u003C- Enter a description for your Runner\n   Enter tags for the runner (comma-separated):\n   Mac, mini, M1, dev, xcode  \u003C- Optionally, enter tags for the runner\n   Registering runner... succeeded                     runner=1mWBwzWA\n   Enter an executor: shell, virtualbox, docker+machine, custom, docker, docker-ssh, kubernetes, parallels, ssh, docker-ssh+machine:\n   shell  \u003C- Enter the \"shell\" executor for the runner\n   Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\n   ```\n\n1. Reload the CI/CD configuration page of your GitLab instance. The runner is now linked to your project and displays as available.\n\n   > **Note:** If you have several projects in a GitLab group, you can configure the Runner at the group-level. Runners available at the group-level are available for all projects within said group.\n\n### Configuring CI for your project\n\nGitLab stores the configuration of the CI in a file called `.gitlab-ci.yml`. This file should be in the folder you created for your project. Typically this is the same directory where your Xcode project file (`ProjectName.xcodeproj`) is located. The GitLab CI configuration file is written in [YAML](https://yaml.org/).\n\nInside the configuration file you can specify information like:\n\n* The scripts you want to run.\n* Other configuration files and templates you want to include.\n* Dependencies and caches.\n* The commands you want to run in sequence and those you want to run in parallel.\n* The location to deploy your application to.\n* Whether you want to run the scripts automatically or trigger any of them manually.\n\n1. Open a text editor on your local computer and create the `.gitlab-ci.yml` file as in the following example.\n\n   ```\n   stages:\n     - build\n     - test\n\n   build-code-job:\n     stage: build\n     script:\n       - echo \"Check the ruby version, then build some Ruby project files:\"\n       - ruby -v\n       - rake\n\n   test-code-job1:\n     stage: test\n     script:\n       - echo \"If the files are built successfully, test some files with one command:\"\n       - rake test1\n   ```\n\n1. Save the file and make a new commit to add it to your repository.\n\n1. Push the commit to GitLab. The CI will automatically launch the tasks on your Runner.\n\nFor more information on the GitLab CI configuration file, refer to the [official documentation](https://docs.gitlab.com/ee/ci/yaml/gitlab_ci_yaml.html).\n\n### Speed up development with Scaleway and GitLab\n\nHaving a dedicated Mac available for executing your CI jobs can reduce your development team's cycle time. In this tutorial, we covered configuring a dedicated Mac mini M1 to host a GitLab Runner. If you want to learn more about the Mac mini M1 as-a-Service, refer to our [product documentation](https://www.scaleway.com/en/docs/apple-silicon-as-a-service-quickstart/).\nWe invite the GitLab community to start building on Scaleway today with a €10 voucher to use on dozens of products & services. Find out more [here.](https://www.scaleway.com/en/gitlab-m1/)\n\n\u003Chr>\n\n_Mac mini, macOS are trademarks of Apple Inc., registered in the U.S. and other countries and regions. IOS is a trademark or registered trademark of Cisco in the U.S. and other countries and is used by Apple under license. Scaleway is not affiliated with Apple Inc._\n",[702,771,771],{"slug":3916,"featured":6,"template":678},"tuto-mac-m1-gitlab-ci","content:en-us:blog:tuto-mac-m1-gitlab-ci.yml","Tuto Mac M1 Gitlab Ci","en-us/blog/tuto-mac-m1-gitlab-ci.yml","en-us/blog/tuto-mac-m1-gitlab-ci",{"_path":3922,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3923,"content":3929,"config":3935,"_id":3937,"_type":16,"title":3938,"_source":17,"_file":3939,"_stem":3940,"_extension":20},"/en-us/blog/managing-global-projects-requiring-rapid-response-continuously",{"title":3924,"description":3925,"ogTitle":3924,"ogDescription":3925,"noIndex":6,"ogImage":3926,"ogUrl":3927,"ogSiteName":692,"ogType":693,"canonicalUrls":3927,"schema":3928},"How to leverage distributed engineering teams for rapid response","Rapid response issues can be handled in a compressed time frame if distributed engineering teams can work continuously. Here's what we've learned.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681331/Blog/Hero%20Images/all-remote-world-banner-1920x1080.png","https://about.gitlab.com/blog/managing-global-projects-requiring-rapid-response-continuously","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to leverage distributed engineering teams for rapid response\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chris Baus\"}],\n        \"datePublished\": \"2021-06-04\",\n      }",{"title":3924,"description":3925,"authors":3930,"heroImage":3926,"date":3932,"body":3933,"category":14,"tags":3934},[3931],"Chris Baus","2021-06-04","\n\nI am an [Engineering Manager](https://gitlab.com/chris_baus) working on a distributed engineering team at GitLab. [Our team](/handbook/engineering/development/fulfillment/purchase/) is distributed globally, and we have engineers working in India, Germany, Australia, New Zealand, and the United States. I am [located](https://www.google.com/maps/place/Stateline,+NV/) in the U.S. in Pacific Standard Time (PST). In coordination with [other](/handbook/engineering/development/ops/verify/#verifycontinuous-integration) globally distributed engineering teams, we recently responded to an [abuse issue](/blog/prevent-crypto-mining-abuse/) which was causing disruptions for legitimate GitLab.com users, and required a [rapid response](/handbook/engineering/workflow/#rapid-engineering-response).\n\n## Global distribution as an advantage\n\nMany managers view global team distribution as a constraint (because synchronous communication becomes more difficult), but it is possible to [embrace the constraint](https://basecamp.com/gettingreal/03.4-embrace-constraints) and turn it into an advantage. When teams are globally distributed it is possible for work to continue around-the-clock, uninterrupted, and decrease the overall delivery time of projects. I refer to this as \"continuous development.\"\n\nWhile we don't typically work this way, when problems are pressing, working continuously can be a strategy to advance the delivery time frame. In this case, two engineers from our team worked on the problem [17](https://www.google.com/maps/place/Bellingham,+WA/) [hours](https://www.google.com/maps/place/Melbourne+VIC,+Australia/) apart. This provided some overlap in the afternoon (PST), but for the most part, the engineers were working on the project at different times which allowed work to progress continuously.\n\nIt requires some extra management compared to the typical workflow, but the effort may be worth the investment if time is critical.\n\n## Define clear handoffs\n\nOne risk of multiple engineers working continuously and [asynchronously](https://baus.net/embrace-asynchronous-work/) is duplicating work from lack of clear separation of work or handoffs. If possible, it is best to separate work, so engineers are working in different areas of code, but separating work might not always be feasible or practical. In either case, when an engineer finishes working for the day, they should provide an update describing the work which was completed, any problems impeding progress, and what is left to be done.\n\nIf engineers are working in the same area of code, it should be clearly defined if they are working in the same branch or separate branches. If they are working in the same branch, it might make sense for one engineer to maintain branch and accept merges from other engineers before it merged into the main development branch.\n\n## Agree on interfaces\n\nWhen distributed engineering teams are working on a project, it is critical to define clear and documented interfaces between systems and components. System interfaces should be documented in a centrally maintained location. If there is a need to change the interface, then everyone affected by the change should be notified.\n\nIn retrospect, we lost nearly a day of testing because of confusion about an interface between the frontend and backend of the system. These types of problems tend to be amplified when not all engineers involved in the project are available at the same time, as it may take an entire 24-hour cycle to handle and communicate changes. When a discrepancy is found, the problem should be documented by the engineers currently working and, if possible, a solution proposed.\n\n## Place synchronous communication on management\n\nWhen working concurrently, to help ensure all teams are on the same path, it can be helpful to discuss the project status synchronously. This can be difficult to arrange with distributed engineering teams. On this project, the technical teams met twice weekly for 15-30 minutes. It can be tempting to require team members to work off hours to attend synchronous meetings. I'd recommend fighting this tendency.\n\nIt's the responsibility of a manager to ensure effective communication across teams. During rapid-response actions, it's helpful to keep flexible working hours to synchronize with team members across different time zones. I accept working outside my typical hours (knowing I can [adjust my hours](/company/culture/all-remote/non-linear-workday/) at other times of the day), to communicate the status of my team synchronously. This also requires the manager to have a more detailed technical understanding of the implementation and status than is normally required, so they can speak on behalf of offline team members.\n\nInstead of requiring synchronous meeting attendance, [take good notes](/company/culture/all-remote/meetings/#document-everything-live-yes-everything) and [record the meeting](https://www.youtube.com/channel/UCMtZ0sc1HHNtGGWZFDRTh5A) so team members in other time zones can review the status and decisions from synchronous meetings.\n\n## Trade-offs\n\nIn many ways, engineering is the art of balancing trade-offs. Operating in a continuous, globally-distributed fashion takes more management and cognitive overhead than typical asynchronous workflows, but when time is a priority, it could decrease the release time on critical projects.\n\nOperating continuously may come at cost of other management tasks as compressing time increases the effort required to oversee the project requiring a [rapid response](/handbook/engineering/workflow/#rapid-engineering-response). At the end of the rapid-response issue, a retrospective should be held to determine if the engineering strategy provided the expected results, relative to the increased overhead. My recommendation is to be realistic about costs when planning continuous development even when it provides short-term results.\n\n_Read more on [leading engineering teams](/blog/cadence-is-everything-10x-engineering-organizations-for-10x-engineers/)._\n",[2409,915,1646,1347],{"slug":3936,"featured":6,"template":678},"managing-global-projects-requiring-rapid-response-continuously","content:en-us:blog:managing-global-projects-requiring-rapid-response-continuously.yml","Managing Global Projects Requiring Rapid Response Continuously","en-us/blog/managing-global-projects-requiring-rapid-response-continuously.yml","en-us/blog/managing-global-projects-requiring-rapid-response-continuously",{"_path":3942,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3943,"content":3949,"config":3955,"_id":3957,"_type":16,"title":3958,"_source":17,"_file":3959,"_stem":3960,"_extension":20},"/en-us/blog/advanced-search-data-migrations",{"title":3944,"description":3945,"ogTitle":3944,"ogDescription":3945,"noIndex":6,"ogImage":3946,"ogUrl":3947,"ogSiteName":692,"ogType":693,"canonicalUrls":3947,"schema":3948},"GitLab's data migration process for Advanced Search","We needed a more streamlined data migration process for Advanced search. Here's what we did.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682017/Blog/Hero%20Images/advanced-search-migrations.jpg","https://about.gitlab.com/blog/advanced-search-data-migrations","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's data migration process for Advanced Search\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dmitry Gruzd\"}],\n        \"datePublished\": \"2021-06-01\",\n      }",{"title":3944,"description":3945,"authors":3950,"heroImage":3946,"date":3952,"body":3953,"category":14,"tags":3954},[3951],"Dmitry Gruzd","2021-06-01","\n\nFor some time now, GitLab has been working on enabling the Elasticsearch\nintegration on GitLab.com to allow as many GitLab.com users as possible access\nto the [Advanced Global Search](https://docs.gitlab.com/ee/user/search/advanced_search.html)\nfeatures. Last year, after enabling Advanced Search for all licensed customers on\nGitLab.com we were thinking how to simplify the rollout of some Advanced Search\nfeatures that require changing the data in Elasticsearch.\n\n(If you're interested in the lessons we learned on our road to Enabling\nElasticsearch for GitLab.com, you can read [all about it](/blog/elasticsearch-update/).\n\n## The data migration process problem \n\nSometimes we need to change mappings of an index or backfill a field, and\nreindexing everything from scratch or using [Zero downtime reindexing](https://docs.gitlab.com/ee/integration/elasticsearch.html#zero-downtime-reindexing)\nmight seem like an obvious solution. However, this is not a scalable option for\nbig GitLab instances. GitLab.com is the largest known installation of GitLab and\nas such has a lot of projects, code, issues, merge requests and other things that\nneed to be indexed. For example, at the moment our Elasticsearch cluster has\nalmost 1 billion documents in it. It would take many weeks or even months to\nreindex everything and for all that time indexing would need to remain paused, therefore\nsearch results would quickly become outdated.\n\n## Original plan for multi-version support\n\nOriginally, we were planning to introduce multi-version support using an approach\nthat is fully reliant on GitLab to manage both indices, reading from the old one\nand writing to both until the migration is finished. You can read more information at\n[!18254](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/18254) and\n[&1769](https://gitlab.com/groups/gitlab-org/-/epics/1769). As of writing this,\nmost of the code for this approach still exists in GitLab in a half-implemented form.\n\nThere were 2 primary concerns with this approach:\n\n1. Reindexing would require the GitLab application to read every single document\nfrom the storage and send it to Elasticsearch again. Doing so\nwould put a big strain on different parts of the application, such as database,\nGitaly, and Sidekiq.\n1. Reindexing everything from GitLab to the cluster again may be very wasteful on\noccasions where you only need to change a small part of the index. For example, if\nwe want to add epics to the index, it is very wasteful to reindex every document\nin the index when we could very quickly just index all the epics. There are many\nsituations where we will be trying to perform some migration that can be done more\nefficiently using a targeted approach (e.g. adding a new field to a document type\nonly requires reindexing all the documents that actually have that field).\n\nFor these reasons we've decided to create a different data migration process.\n\n## Our revised data migration process\n\nWe took inspiration from the [Rails DB migrations](https://guides.rubyonrails.org/active_record_migrations.html).\nWe wanted to apply the best practices from it without having to re-architect what\nthe Rails team has already implemented.\n\nFor example, we've decided that we would have a special directory with time-stamped\nmigration files. We wanted to achieve a strict execution order so that many\nmigrations might be shipped simultaneously. A special background processing worker\nwill be checking this folder on schedule. This is slightly different to rails background migrations where the operator is required to manually run the migration. We decided to make it fully automated and run it in the background to avoid the need for self-managed customers to add extra steps to the migration process. This would have likely made it much more difficult for everyone involved as there are many ways to run GitLab. This extra constraint also forces us to always think of migrations as possibly incomplete at any point in the code which is essential for zero-downtime.\n\nAt first, we wanted to store the migration state in the Postgresql database, but\ndecided against it since this may not be perfect for the situation where a user\nwants to connect a new Elasticsearch cluster to GitLab. It's better to store the\nmigrations themselves in the Elasticsearch cluster itself so they're more likely to be in\nsync with the data.\n\nYou can see your new migration index in your Elasticsearch cluster. It's called\n`gitlab-production-migrations`. GitLab stores a few fields there. We use the\nversion number as the document id. This is an example document:\n\n```\n{\n    \"_id\": \"20210510143200\",\n    \"_source\": {\n        \"completed\": true,\n        \"state\": {\n        },\n        \"started_at\": \"2021-05-12T07:19:08.884Z\",\n        \"completed_at\": \"2021-05-12T07:19:08.884Z\"\n    }\n}\n```\n\nThe state field is used to store data that's required to run batched migrations.\nFor example, for batched migrations we store a slice number and a task id for\ncurrent Elasticsearch reindex operation and we update the state after every run.\n\nThis is how an example migration looks:\n\n```ruby\nclass MigrationName \u003C Elastic::Migration\n  def migrate\n    # Migrate the data here\n  end\n\n  def completed?\n    # Return true if completed, otherwise return false\n  end\nend\n```\n\nThis looks a lot like [Rails DB migrations](https://guides.rubyonrails.org/active_record_migrations.html),\nwhich was our goal from the beginning. The main difference is that it has an additional method to\ncheck if a migration is completed. We've added that method because we need to\nexecute asynchronous tasks quite often and we want to check if it's completed\nlater in a different worker process.\n\n## Migration framework logic\n\nThis is a simple flow chart to demonstrate the high level logic of the new migration framework.\n\n```mermaid\ngraph TD\n    CRON(cron every 30 minutes) --> |executes| WORKER[MigrationWorker]\n    WORKER --> B(an uncompleted migration is found)\n    B --> HALT(it's halted)\n    B --> UN(it's uncompleted)\n    B --> COMP(it's finished)\n    HALT --> WARN(show warning in the admin UI)\n    WARN --> EX(exit)\n    UN --> PREF(migration preflight checks)\n    PREF --> RUN(execute the migration code)\n    COMP --> MARK(mark it as finished)\n    MARK --> EX\n```\n\nAs you can see above, there are multiple different states of a migration. For example,\nthe framework allows it to be halted when it has too many failed attempts. In\nthat case, the warning will be shown in the admin UI with a button for restarting\nthe migration.\n\n![How the warning looks like](https://about.gitlab.com/images/blogimages/advanced_search/halted_warning.png)\n\n## Configuration options\n\nWe've introduced many useful configuration options into the framework, such as:\n\n- `batched!` - Allows the migration to run in batches. If set, the worker will\nre-enqueue itself with a delay which is set using the `throttle_delay` option\ndescribed below. We use this option to reduce the load and ensure that the\nmigration won't time out.\n\n- `throttle_delay` - Sets the wait time in between batch runs. This time should be\nset high enough to allow each migration batch enough time to finish.\n\n- `pause_indexing!` - Pauses indexing while the migration runs. This setting will\nrecord the indexing setting before the migration runs and set it back to that\nvalue when the migration is completed. GitLab only uses this option when\nabsolutely necessary since we attempt to minimize the downtime as much as possible.\n\n- `space_requirements!` - Verifies that enough free space is available in the\ncluster when the migration is running. This setting will halt the migration if the\nstorage required is not available. This option is used to\nprevent situations when your cluster runs out of space when attempting to execute\na migration.\n\nYou can see the up-to-date list of options in this development [documentation section](https://docs.gitlab.com/ee/development/elasticsearch.html#migration-options-supported-by-the-elasticmigrationworker).\n\n## Data migration process results\n\nWe implemented the Advanced Search migration framework in the 13.6 release and\nhave been improving it since. You can see some details in the original issue\n[#234046](https://gitlab.com/gitlab-org/gitlab/-/issues/234046). The only\nrequirement for this new feature is that you should create your index using at\nleast version 13.0. We have that requirement since we're heavily utilizing\naliases, which were introduced in 13.0. As you might know, over the last few\nreleases we've been working on separating different document types into their own\nindices. This migration framework has been a tremendous help for our initiative.\nWe've already completed the migration of issues (in 13.8), comments (in 13.11),\nand merge requests (in 13.12) with a noticeable performance improvement.\n\nSince we've accumulated so many different migrations over the last few releases\nand they require us to support multiple code paths for a long period of time,\nwe've decided to remove older migrations that were added prior to the 13.12\nrelease. You can see some details in this [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/329952).\nWe plan to continue the same strategy in the future, which is one of the reasons\nwhy you should always upgrade to the latest minor version before migrating to a\nmajor release.\n\nIf you're interested in contributing to features that require Advanced Search\nmigrations, we have a dedicated [documentation section](https://docs.gitlab.com/ee/development/elasticsearch.html#creating-a-new-advanced-search-migration)\nthat explains how to create one and lists all available options for it.\n",[749,2331,727],{"slug":3956,"featured":6,"template":678},"advanced-search-data-migrations","content:en-us:blog:advanced-search-data-migrations.yml","Advanced Search Data Migrations","en-us/blog/advanced-search-data-migrations.yml","en-us/blog/advanced-search-data-migrations",{"_path":3962,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3963,"content":3969,"config":3975,"_id":3977,"_type":16,"title":3978,"_source":17,"_file":3979,"_stem":3980,"_extension":20},"/en-us/blog/gitlab-and-jira-integration-the-final-steps",{"title":3964,"description":3965,"ogTitle":3964,"ogDescription":3965,"noIndex":6,"ogImage":3966,"ogUrl":3967,"ogSiteName":692,"ogType":693,"canonicalUrls":3967,"schema":3968},"GitLab and Jira integration: the final steps","The last of our three-part series on GitLab and Jira integrations offers a step-by-step look at how the tools work together.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679490/Blog/Hero%20Images/jira-importer-blog-post.png","https://about.gitlab.com/blog/gitlab-and-jira-integration-the-final-steps","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab and Jira integration: the final steps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tye Davis\"}],\n        \"datePublished\": \"2021-05-24\",\n      }",{"title":3964,"description":3965,"authors":3970,"heroImage":3966,"date":3972,"body":3973,"category":14,"tags":3974},[3971],"Tye Davis","2021-05-24","\n_This is the third in our three-part series on GitLab and Jira integrations. [Part one](/blog/integrating-gitlab-com-with-atlassian-jira-cloud/) explained how to integrate GitLab.com with Jira Cloud. [Part two](/blog/gitlab-jira-integration-selfmanaged/) walked through a detailed explanation of integrating GitLab self-managed with Jira._\n\nAfter the integration is set up on GitLab and Jira, you can:\n\n* Refer to any Jira issue by its ID in GitLab branch names, commit messages, and merge request titles.\n\n* Using commit messages in GitLab, you have the ability to move Jira issues along that Jira projects defined transitions. Here you can see that this Jira issue has Backlog, Selected for Development, In Progress and Done. \n\n![Issue View in Jira](https://about.gitlab.com/images/blogimages/atlassianjira/issueview.png){: .shadow.medium.center}\nIssue View in Jira\n{: .note.text-center}\n\n* As referenced in the Base GitLab-Jira integration, when you comment in a merge request and commit referencing an issue, e.g., PROJECT-7, will add a comment in Jira issue in the format. In addition, by commenting in a jira transition (putting a “#” first), this will initiate the movement of a Jira Issue to the desired transition. Below is using the built-in GitLab Web IDE (this can be done in your Web IDE of choice as well).\n\n![Comment in a Commit/MR](https://about.gitlab.com/images/blogimages/atlassianjira/commitcomment.png){: .shadow.medium.center}\nComment in a Commit/MR\n{: .note.text-center}\n\n* Currently, the Jira-GitLab Dev Panel integration via DVCS refreshes on a 60-min schedule. To expedite, you’ll need to manually refresh the specific project with your most recent changes.\n\n![Dev Panel refreshes every 60 minutes](https://about.gitlab.com/images/blogimages/atlassianjira/devpanelrefresh.png){: .shadow.medium.center}\nDev Panel refreshes every 60 minutes\n{: .note.text-center}\n\n* See the linked branches, commits, and merge requests in Jira issues (merge requests are called “pull requests” in Jira issues).\nJira issue IDs must be formatted in uppercase for the integration to work.\n\n![See GitLab linked in the Dev Panel](https://about.gitlab.com/images/blogimages/atlassianjira/gitlabdevpanel.png){: .shadow.medium.center}\nSee GitLab linked in the Dev Panel\n{: .note.text-center}\n\n* Click the links to see your GitLab repository data.\n\n![Click into the commits](https://about.gitlab.com/images/blogimages/atlassianjira/clickintocommit.png){: .shadow.medium.center}\nClick into the commits\n{: .note.text-center}\n\n![See GitLab linked in the Dev Panel](https://about.gitlab.com/images/blogimages/atlassianjira/clickintopr.png){: .shadow.medium.center}\nClick into the merge (pull) requests\n{: .note.text-center}\n\nFor more information on using Jira Smart Commits to track time against an issue, specify an issue transition, or add a custom comment, see the Atlassian page Using [Smart Commits](https://confluence.atlassian.com/fisheye/using-smart-commits-960155400.html)\n\n## View Jira Issues within GitLab\n\nYou can browse and search issues from a selected Jira project directly in GitLab. This requires configuration in GitLab by an administrator.\n\n* In the GitLab integration setup for Jira, click \"enable Jira issues.\"\n\n![Enable Jira issues in GitLab](https://about.gitlab.com/images/blogimages/atlassianjira/enablejiraissues.png){: .shadow.medium.center}\nEnable Jira issues in GitLab\n{: .note.text-center}\n\n* Locate your project key in Jira.\n\n![Locate your project key in Jira](https://about.gitlab.com/images/blogimages/atlassianjira/locateprojectkey.png){: .shadow.medium.center}\nLocate your project key in Jira\n{: .note.text-center}\n\n* Add your proejct key into the GitLab integration setup for Jira.\n\n![Add your proejct key to GitLab](https://about.gitlab.com/images/blogimages/atlassianjira/addprojectkey.png){: .shadow.medium.center}\nAdd your proejct key to GitLab\n{: .note.text-center}\n\n* Select \"Jira Issues\", then \"Issue List\" from the left panel in GitLab\n\n![Select Jira Issues on left panel](https://about.gitlab.com/images/blogimages/atlassianjira/selectjiraissues.png){: .shadow.medium.center}\nSelect Jira Issues\n{: .note.text-center}\n\nFrom the Jira Issues menu, click Issues List. The issue list defaults to sort by Created date, with the newest issues listed at the top. You can change this to Last updated.\nIssues are grouped into tabs based on their [Jira status](https://confluence.atlassian.com/adminjiraserver070/defining-status-field-values-749382903.html).\n\n* The Open tab displays all issues with a Jira status in any category other than Done.\n* The Closed tab displays all issues with a Jira status categorized as Done.\n* The All tab displays all issues of any status.\n\nClick an issue title to open its original Jira issue page for full details.\n\n![View Jira issues in GitLab](https://about.gitlab.com/images/blogimages/atlassianjira/viewjiraissues.png){: .shadow.medium.center}\nView Jira issues in GitLab\n{: .note.text-center}\n\n### Search and filter the issues list\n\nTo refine the list of issues, use the search bar to search for any text contained in an issue summary (title) or description.\nYou can also filter by labels, status, reporter, and assignee using URL parameters. Enhancements to be able to use these through the user interface are [planned](https://gitlab.com/groups/gitlab-org/-/epics/3622).\n\n* To filter issues by labels, specify one or more labels as part of the labels[] parameter in the URL. When using multiple labels, only issues that contain all specified labels are listed. /-/integrations/jira/issues?labels[]=backend&labels[]=feature&labels[]=QA\n* To filter issues by status, specify the status parameter in the URL. /-/integrations/jira/issues?status=In Progress\n* To filter issues by reporter, specify a reporter’s Jira display name for the author_username parameter in the URL. /-/integrations/jira/issues?author_username=John Smith\n* To filter issues by assignee, specify their Jira display name for the assignee_username parameter in the URL. /-/integrations/jira/issues?assignee_username=John Smith\n\n## Troubleshooting\nIf these features do not work as expected, it is likely due to a problem with the way the integration settings were configured.\n\n### GitLab is unable to comment on a Jira issue\n\nMake sure that the Jira user you set up for the integration has the correct access permission to post comments on a Jira issue and also to transition the issue, if you’d like GitLab to also be able to do so. Jira issue references and update comments will not work if the GitLab issue tracker is disabled.\n\n### GitLab is unable to close a Jira issue\n\nMake sure the Transition ID you set within the Jira settings matches the one your project needs to close an issue.\nMake sure that the Jira issue is not already marked as resolved; that is, the Jira issue resolution field is not set. (It should not be struck through in Jira lists.)\n\n## Conclusion\n \nGitLab helps teams ship software faster with technology integration options, such as the integration with Jira, that automate tasks, provide visibility into development progress and the greater end-to-end software lifecycle. We recognize that many companies use Jira for Agile project management and our seamless integration brings Jira together with GitLab. \n\n## Watch and learn\n\nMore of a video person? For a walkthrough of the integration with GitLab for Jira, watch and learn how to configure GitLab Jira Integration using Marketplace App.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/fWvwkx5_00E\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\n",[1347,232,894],{"slug":3976,"featured":6,"template":678},"gitlab-and-jira-integration-the-final-steps","content:en-us:blog:gitlab-and-jira-integration-the-final-steps.yml","Gitlab And Jira Integration The Final Steps","en-us/blog/gitlab-and-jira-integration-the-final-steps.yml","en-us/blog/gitlab-and-jira-integration-the-final-steps",{"_path":3982,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3983,"content":3989,"config":3994,"_id":3996,"_type":16,"title":3997,"_source":17,"_file":3998,"_stem":3999,"_extension":20},"/en-us/blog/dag-manual-fix",{"title":3984,"description":3985,"ogTitle":3984,"ogDescription":3985,"noIndex":6,"ogImage":3986,"ogUrl":3987,"ogSiteName":692,"ogType":693,"canonicalUrls":3987,"schema":3988},"How to use manual jobs with `needs:` relationships","Are you using manual jobs and needs relationship in your CI/CD pipeline? Learn more about the fix that might cause your pipeline to behave differently.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683170/Blog/Hero%20Images/blog_cover2.png","https://about.gitlab.com/blog/dag-manual-fix","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use manual jobs with `needs:` relationships\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2021-05-20\",\n      }",{"title":3984,"description":3985,"authors":3990,"heroImage":3986,"date":3991,"body":3992,"category":14,"tags":3993},[1020],"2021-05-20","\n\n## A bug when job `needs` a manual job\n\nIn [13.12 we fixed a bug](https://gitlab.com/gitlab-org/gitlab/-/issues/31264) that might affect the existing behavior of your pipeline. We explain why we had to fix the bug, the possible impact of this change on your pipeline, and the proposed workaround if you would like to revert this behavior.\n\n## Background on a two-job pipeline\n\nIn GitLab CI/CD you can easily configure a job to require manual intervention before it runs. The job gets added to the pipeline, but doesn't run until you click the **play** button on it.\n\nLet's look at a two-job pipeline:\n\n```yaml\nstages:\n  - stage1\n  - stage2\n\njob1:\n  stage: stage1\n  script:\n    - echo \"this is an automatic job\"\n\nmanual_job:\n  stage: stage2\n  script:\n    - echo \"This is a manual job which doesn't start automatically, and the pipeline can complete without it starting.\"\n  when: manual # This setting turns a job into a manual one\n```\n\nThis is how it looks when we look at the pipeline graph:\n\n![image2](https://about.gitlab.com/images/blogimages/11-05-2021-when-job-needs-manual/blog1.png){: .shadow.medium.center.wrap-text}\n\nNotice that the manual job gets skipped, and the pipeline completes successfully even though the manual job did not get triggered. This happens because manual jobs are considered optional, and do not need to run.\n\nInternally, manual jobs have `allow_failure` set to true by default, which means that these skipped manual jobs do not cause a pipeline failure. The YAML code below demonstrates how to write the manual job, which results in the same behavior. The job doesn't automatically start, is skipped, and the pipeline passes.\n\n```yaml\nmanual_job:\n  stage: stage2\n  script:\n    - echo \"This is a manual job which doesn't start automatically, and the pipeline can complete without it starting.\"\n  when: manual\n  allow_failure: true # this line is redundant since manual job has this setting by default\n```\n\nYou can set `allow_failure` to true for any job, including both manual and automatic jobs, and then the pipeline does not care if the job runs successfully or not.\n\n### How to expand the configuration with `needs` (DAG)\n\n  Last year we introduced the [`needs` keyword which lets you create a Directed Acyclic Graphs (DAG) to speed up your pipeline](https://docs.gitlab.com/ee/ci/yaml/#needs). The `needs` keyword creates a dependency between two jobs regardless of their stage.\n\nLet's look at this example:\n\n```yaml\nstages:\n  - stage1\n  ....\n  - stage10\n\njob1: # this is the first job that runs in the pipeline\n  stage: stage1\n  script:\n    - echo \"exit 0\"\n.....\n\njob10:\n  needs:  # Defined a \"needs\" relationship with job1\n    - job1\n  stage: stage10\n  script:\n    - echo \"This job runs as soon as job1 completes, even though this job is in stage10.\"\n```\n\nThe `needs` keyword creates a dependency between the two jobs, so `job10` runs as soon as `job1` **finishes running** successfully, regardless of the stage ordering.\n\nSo what happens if a job `needs` a manual job, that doesn't start running automatically?\n\nLet's look at the following example:\n\n```yaml\nstages:\n  - build\n  - test\n  - deploy\n\nbuild:\n  stage: build\n  script: exit 0\n\ntest:\n  stage: test\n  when: manual\n  script: exit 0\n\ndeploy:\n  stage: deploy\n  script: echo \"when should this job run?\"\n  needs:\n    - test\n```\n\nBefore 13.12, this type of configuration would cause the pipeline to get stuck. The `deploy` job can only start when the `test` job completes, but the `test` job does not start automatically. The rest of the pipeline stops and waits for someone to run the manual `test` job.\n\n![image3](https://about.gitlab.com/images/blogimages/11-05-2021-when-job-needs-manual/blog2.png){: .shadow.medium.center.wrap-text}\n\nThis behavior is even worse with larger pipelines:\n\n![image4](https://about.gitlab.com/images/blogimages/11-05-2021-when-job-needs-manual/blog3.png){: .shadow.medium.center.wrap-text}\n\nThe example above shows there is a needs relationship between `post test` job and the `test` job (which is a manual job) as you can see the pipeline is stuck in a running state and any subsequent jobs will not run.\n\nThis was not the behavior most users expected, so we improved it in 13.12. Now, if there is a `needs` relationship pointing to a manual job, the pipeline doesn't stop by default anymore. The manual job is considered optional by default in all cases now. Any jobs that have a `needs` relationship to manual jobs are now also considered optional and skipped if the manual job isn't triggered. If you start the manual job, the jobs that need it can start after it completes.\n\nNote that if you start the manual job before a later job that has it in a `needs` configuration, the later job will still wait for the manual job to finishes running.\n\n## What if I don't want this new behavior?\n\nOne of the reasons we selected this solution is that you can quickly revert this change. If you made use of this inadvertent behavior and configured your pipelines to use it to block on manual jobs, it's easy to return to that previous behavior. All you have to do is override the default `allow_failure` in the manual job with `allow_failure: false`. This way the manual job is no longer optional, and the pipeline status will be marked as blocked and wait for you to run the job manually.\n\n```yaml\nstages:\n  - build\n  - test\n  - deploy\n\nbuild:\n  stage: build\n  script: exit 0\n\ntest:\n  stage: test\n  when: manual\n  allow_failure: false  # Set to false to return to the previous behavior.\n  script: exit 0\n\ndeploy:\n  stage: deploy\n  script: exit 0\n  needs:\n    - test\n```\n\nShare any thoughts, comments, or questions, by opening an issue in GitLab and mentioning me (`@dhershkovitch`).\n",[832,771,894],{"slug":3995,"featured":6,"template":678},"dag-manual-fix","content:en-us:blog:dag-manual-fix.yml","Dag Manual Fix","en-us/blog/dag-manual-fix.yml","en-us/blog/dag-manual-fix",{"_path":4001,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4002,"content":4007,"config":4011,"_id":4013,"_type":16,"title":4014,"_source":17,"_file":4015,"_stem":4016,"_extension":20},"/en-us/blog/vscode-workflows-for-working-with-gitlab",{"title":4003,"description":4004,"ogTitle":4003,"ogDescription":4004,"noIndex":6,"ogImage":2284,"ogUrl":4005,"ogSiteName":692,"ogType":693,"canonicalUrls":4005,"schema":4006},"Visual Studio code editor: Eight tips for using GitLab VS Code","Learn how to use the Visual Studio code editor more efficiently and meet some of the GitLab contributors that made these new features happen.","https://about.gitlab.com/blog/vscode-workflows-for-working-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Visual Studio code editor: Eight tips for using GitLab VS Code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tomas Vik\"}],\n        \"datePublished\": \"2021-05-20\",\n      }",{"title":4003,"description":4004,"authors":4008,"heroImage":2284,"date":3991,"body":4009,"category":14,"tags":4010},[3291],"\n\nAs a software engineer, I spend a significant portion of my day in the Visual Studio code editor. Since I started maintaining the officially supported [GitLab VS Code extension](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow), I've developed a few tricks that make me a productive GitLab user. Below, I share eight tips that make my work more efficient and productive, while also introducing you to some of the GitLab contributors who made this tooling happen.\n\n## What is Visual Studio Code?\n[Visual Studio Code](https://en.wikipedia.org/wiki/Visual_Studio_Code), developed by Microsoft, lets a user debug source code in various languages from the editor. It is also used for syntax highlighting, intelligent code completion, code refactoring, embedded Git and autocomplete. VS Code, as it is commonly known, can be launched or attached to running apps.\n\nIt is designed for Windows, Linux, and MacOS. VS Code can be used with several programming languages such as Java, JavaScript, Node.js, Python, C++ and Fortran. Support for additional languages is provided by freely available extensions on the [VS Code Marketplace](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow).\n\nBelow are eight tips for using GitLab VS Code.\n\n### How to clone any GitLab project\n\nGitLab contributor [Felix Haase](https://gitlab.com/haasef) recently [implemented a feature that lets you clone any GitLab project where you are a member](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/merge_requests/172). To clone the project, use the official `Git: Clone` command and select your GitLab instance. Use the `Git: Clone` command by selecting the command from the [Command Palette](https://code.visualstudio.com/docs/getstarted/userinterface#_command-palette).\n\nThis feature can save you time if you already know the name of the project you want to clone.\n\n![VS Code clone dialogue](https://about.gitlab.com/images/blogimages/vscode-workflows-for-working-with-gitlab/clone.png){: .shadow.medium.center}\nVS Code lets you filter which project to clone.\n{: .note .text-center}\n\n### How to view MRs and issues\n\nIt is easy to look through issues and MRs that you created, are assigned to, or are reviewing using GitLab. The lesser-known feature of the GitLab Workflow extension is [custom queries](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/blob/main/docs/user/custom-queries.md). Custom search queries allow you to refine the search expressions for issues and MRs that appear in the VS Code side panel. You can apply all the advanced search terms you are used to from the GitLab web search: Labels, full-text search expression, milestones, authors, assignees, and more.\n\n![GitLab extension sidebar](https://about.gitlab.com/images/blogimages/vscode-workflows-for-working-with-gitlab/issues-and-mrs.png){: .shadow.medium.center}\nSee your issues and MRs in the VS Code sidebar.\n{: .note .text-center}\n\nAnother option is [reviewing the MRs in VS Code](/blog/mr-reviews-with-vs-code/). The final functionality that is missing in MR review is [creating new comments on the MR diff](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/342), which we plan to ship by July 2021.\n\n### How to create an MR with two clicks\n\nIf you use the `git` command in a terminal, you might have noticed that pushing your branch to GitLab produces the following output:\n\n```txt\nremote: To create a merge request for my-new-branch, visit:\nremote: https://gitlab-instance.com/my-group/my-project/merge_requests/new?merge_request%5Bsource_branch%5D=my-new-branch\n```\n\nAfter clicking the link, the terminal will open your browser on a new MR page where you can create an MR from the branch you just pushed.\n\nWhen I started pushing my branches through VS Code, I missed this feature. To the point that I searched through the VS Code Git Extension logs to find the create MR link (command `Git: Show Git Output`).\n\nLuckily, GitLab contributor [Jonas Tobias Hopusch](https://gitlab.com/jotoho) implemented a [status bar button](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/merge_requests/155) that lets you create MRs just as easily.\n\nTo create an MR from your changes, push them to your remote repository (the cloud icon next to the branch name) and then click on the `GitLab: Create MR.` button.\n\n![VS Code status bar](https://about.gitlab.com/images/blogimages/vscode-workflows-for-working-with-gitlab/status-bar-create-mr.png){: .shadow.medium.center}\nVS Code status bar with buttons from GitLab extension.\n{: .note .text-center}\n\n### How to configure your GitLab CI\n\nThe GitLab extension helps you edit your `.gitlab-ci.yml` configuration file in two ways: Autocompleting environment variables and validating the configuration.\n\nThanks to [Kev's](https://gitlab.com/KevSlashNull) fantastic [contribution](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/merge_requests/140), you can [use CI](/solutions/continuous-integration/) variable autocompletion anywhere in your `.gitlab-ci.yml`. The hints even include variable descriptions and explain supported GitLab versions.\n\n![CI variables autocomlete dialogue](https://about.gitlab.com/images/blogimages/vscode-workflows-for-working-with-gitlab/ci-autocomplete.png){: .shadow.medium.center}\nCI variables autocomplete dialogue.\n{: .note .text-center}\n\nWhen you finish writing your `.gitlab-ci.yml` CI configuration, you can use the `GitLab: Validate GitLab CI config` command to surface any problems before committing the CI config to your repository.\n\n### How to create and paste project snippets\n\nIs there a piece of text that you and your teammates often use? Maybe it is a license header for a file or a test scenario template. You can use GitLab snippets in combination with Visual studio code editor to save you a few keystrokes.\n\nFor example, you can create a [test file snippet](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/snippets/2110322) with the `GitLab: Create snippet` command and then paste it into every new test file you create with the `GitLab: Insert snippet` command.\n\n![Paste Snippet dialogue](https://about.gitlab.com/images/blogimages/vscode-workflows-for-working-with-gitlab/paste-snippet.png){: .shadow.medium.center}\nPaste Snippet dialogue.\n{: .note .text-center}\n\nI mostly use snippets when I want to share a big blob of text. I select the text and then create the snippet with the `GitLab: Create snippet` command.\n\n### How to copy web URL for a project file\n\nMost of the [communication at GitLab happens asynchronously](https://handbook.gitlab.com/handbook/values/#bias-towards-asynchronous-communication). So instead of being able to show your colleague an open file in your editor, you'll need to be able to create a textual pointer to the file.\n\nA straightforward way to do that is to use the `GitLab: Copy link to active file on GitLab` command, which will copy the web URL of the open file into your clipboard. It even includes the line number or a range of lines based on your cursor or selection in the Visual studio code editor.\n\nYou might also consider using the `GitLens: Copy Remote File URL`, which even includes the commit SHA in the URL, making it a permalink. The permalink will always point to the same version of the file regardless of further commits to your branch. We'll look at the GitLens extension in tip number 7 a bit later on.\n\nFor the GitLab Enterprise/Community Edition you can use:\n\"gitweblinks.gitLabEnterprise\": [\n    {\n        \"http\": \"https://local-gitlab\",\n        \"ssh\": \"git@local-gitlab\"\n    }\n]\n\n#### What to do if VS Code source control is not working\n\nA `SourceControl` is the entity responsible for populating the [Source Control model](https://code.visualstudio.com/api/extension-guides/scm-provider) with resource states, instances of `SourceControlResourceState`. Resource states are organized in groups, instances of `SourceControlResourceGroup`.\n\nLinking to issues in source code is a normal part of the VS Code workflow, especially when there's some logic that's difficult to understand or when there's a //TODO comment that needs action. [Users report experiencing issues](https://stackoverflow.com/questions/60232215/visual-studio-code-source-control-not-showing-changes) with changes in the file not appearing to the source code, unless inputted manually.\n\nOne user offered a 7-step solution that worked for them. Another said that all they had to do was disable and then reenable the build in Git extension, which fixed it. Yet another said they went to their “code” folder where they keep all their repos, right-clicked on the folder containing the repo they wanted and opening that folder with VS code.\n\nAn often-used approach to look at issues is to pick one to work on, create a branch to work in, make some commits, then merge your changes back into the main or default branch with a pull request. You can do that from the new Issues view.\n\n#### GitLab Workflow extensions for VS Code\n\nThe [GitLab Workflow extension](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow) integrates GitLab with VS Code. You can decrease context switching and do more day-to-day tasks in VS Code, such as:\n\n- [View issues](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow#browse-issues-review-mrs).\n- Run [common commands](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow#commands) from the Visual Studio Code [command palette](https://code.visualstudio.com/docs/getstarted/userinterface#_command-palette).\n- Create and [review](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow#merge-request-reviews) merge requests directly from Visual Studio Code.\n- [Validate](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow#validate-gitlab-ci-configuration) your GitLab CI configuration.\n- [View the status](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow#information-about-your-branch-pipelines-mr-closing-issue) of your current pipeline.\n- [Create](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow#create-snippet) and paste snippets to, and from, your editor.\n- [Browse repositories](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow#browse-a-repository-without-cloning) without cloning them\n\nDownload the extension from the [Visual Studio Code Marketplace](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow). Then you can configure:\n\n- [Features to display or hide](https://gitlab.com/gitlab-org/gitlab-vscode-extension#extension-settings).\n- [Self-signed certificate](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow#self-signed-certificates) information\n\nReport any issues, bugs, or feature requests in the [gitlab-vscode-extension issue queue](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues).\n\nThis extension supports GitLab Version 13.0 and later. To find your GitLab version, visit [help](https://gitlab.com/help).\n\nYou can also see pipeline status, open MR and closing issue links in the status bar. The pipeline status is updated automatically so you don’t need to open GitLab to see your pipeline status.\n\nWithin the marketplace you can also use the command palette to run the commands and create a GitLab personal access token (required) and assign it to the extension.\n\nYou can also set set the token in an environment variable and learn how to change the VS Code settings. There are instructions for several other in-depth features as well.\n\n## How GitLens simplifies working with VS Code editor\n\nUp until now, the tips were centered around the [GitLab Workflow extension](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow), but there is a fantastic extension that's improving VS Code git integration regardless of where you host your repository: [GitLens](https://marketplace.visualstudio.com/items?itemName=eamodio.gitlens).\n\n### Walking file history\n\nGitLens makes it easy to browse the history of changes to the current file. Each versioned file will have three new editor icons, which provides quick access to all previous revisions of the file. The middle button seen in the image below provides series of actions on the current version (e.g., opening the commit in GitLab web).\n\n![GitLens history browsing buttons](https://about.gitlab.com/images/blogimages/vscode-workflows-for-working-with-gitlab/gitlens-history.png){: .shadow.medium.center}\nGitLens history browsing buttons\n{: .note .text-center}\n\n### How to compare current HEAD against branch or tag\n\nOne of my habits was inspecting `git diff` between my feature branch and the main branch before creating an MR. More often than not, I forgot to write a test or remove some pesky `console.log()`.\n\nGitLens adds multiple sections to your [\"Source Control\" tab](https://code.visualstudio.com/docs/editor/versioncontrol#_scm-providers). For each branch, tag, and commit, click a \"Compare\" icon which will show you changes between your current HEAD and the reference. Seeing the local diff is great for previewing changes before pushing the new branch to the remote.\n\n![GitLens - compare with branch](https://about.gitlab.com/images/blogimages/vscode-workflows-for-working-with-gitlab/gitlens-compare.png){: .shadow.medium.center}\nHow to compare with a branch using GitLens.\n{: .note .text-center}\n\n## Everyone can contribute\n\nNew features and fixes to the GitLab Visual Studio Code editor extension are added every month. If you find any issues or have a feature request, please go to our [GitLab VSCode issues tracker](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues) and if your request isn't already present in the tracker, create one. Everyone can contribute to GitLab, and we welcome your ideas on how to improve our Visual Studio Code editor.\n\n## Read more on Visual Studio and GitLab:\n\n- [Four new tools for your Visual Studio Code and GitLab tool belt](/blog/vscode-workflow-new-features/)\n\n- [VS Code extension development with GitLab](/blog/vscode-extension-development-with-gitlab/)\n\n- [How to do GitLab merge request reviews in VS Code](/blog/mr-reviews-with-vs-code/)\n\n- [How we created a GitLab Workflow Extension for VS Code](/blog/use-gitlab-with-vscode/)\n\n",[232,726],{"slug":4012,"featured":6,"template":678},"vscode-workflows-for-working-with-gitlab","content:en-us:blog:vscode-workflows-for-working-with-gitlab.yml","Vscode Workflows For Working With Gitlab","en-us/blog/vscode-workflows-for-working-with-gitlab.yml","en-us/blog/vscode-workflows-for-working-with-gitlab",{"_path":4018,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4019,"content":4025,"config":4031,"_id":4033,"_type":16,"title":4034,"_source":17,"_file":4035,"_stem":4036,"_extension":20},"/en-us/blog/project-management-using-gitlab-platform",{"title":4020,"description":4021,"ogTitle":4020,"ogDescription":4021,"noIndex":6,"ogImage":4022,"ogUrl":4023,"ogSiteName":692,"ogType":693,"canonicalUrls":4023,"schema":4024},"Can DevOps and project management co-exist? Yes, on the daily at GitLab","Stay agile by using GitLab for DevOps project management","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669575/Blog/Hero%20Images/agilemultipleteams.jpg","https://about.gitlab.com/blog/project-management-using-gitlab-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Can DevOps and project management co-exist? Yes, on the daily at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vick Kelkar\"}],\n        \"datePublished\": \"2021-05-11\",\n      }",{"title":4020,"description":4021,"authors":4026,"heroImage":4022,"date":4028,"body":4029,"category":14,"tags":4030},[4027],"Vick Kelkar","2021-05-11","\n\nGitLab is best known as an all-in-one DevOps platform, but it is also an effective tool for project management. Non-technical teams at GitLab, such as [the Marketing team](/blog/gitlab-for-project-management-one/), use the GitLab DevOps platform for project management, and recently the Alliances team learned that DevOps and project management work well for our purposes.\n\n## About the IBM partnership\n\n[GitLab recently launched a partnership with IBM](/press/releases/2021-01-14-gitlab-IBM-to-support-acceleration-of-devops-automation.html) to help the organization automate their DevOps platform. Since I work on the Alliances team, I needed an efficient, compatible, and high-performance project management application to manage the many moving parts of the GitLab and IBM partnership as well as other projects related to our partnerships.\n\nMy very first instinct was to test a few of the project management web applications on the market, but this would involve a tedious process of convincing my colleagues to join me on this journey to explore a sprawling new set of tools. Then I thought why not explore our own Gitlab DevOps platform as a project management tool? The beauty of GitLab is that it is a [DevOps platform](https://www.youtube.com/watch?v=wChaqniv3HI) delivered as a single easy-to-use application.\n\nSome of my early questions were:\n\n- Can the GitLab DevOps platform work as a project management tool for the strategic Alliance team?\n- Can GitLab manage and track business activities over a period of time?\n- Can team members collaborate and manage various projects using a single application?\n\nIn the end, the journey to adopting GitLab as a DevOps platform and project management tool was similar to the journey many of our customers experience. In this blog post, I will dive deeper into how the Alliance team uses GitLab for project management, explain how we used GitLab to onboard a new strategic partner, and launched support of [GitLab Ultimate for IBM Cloud Paks](https://www.ibm.com/products/gitlab-ultimate). All the pre- and post-onboarding activities in particular required collaboration and contributions from various teams across the organization.\n\n## Applying DevOps features to project management\n\n### About epics and roadmaps\n\nWhy organize work into a hierarchy? I began the strategic partnership effort by organizing the work into multi-level epics. The [idea behind epics is to aggregate similar work](https://docs.gitlab.com/ee/user/group/epics/#epics) (or issues) into epics and manage delivery of work. In the example below, you'll see the top-level epic was called \"IBM cloud paks\" which contained three child epics.\n\n![An example of a multi-level epics from the IBM cloud paks project](https://about.gitlab.com/images/blogimages/proj-mgmt-epic.png){: .shadow.medium.center}\nWork is divided into three time-bound levels for the IBM cloud paks project: Pre-launch, 0-90 days, and 90-180 days.\n{: .note.text-center}\n\nAnother way to represent the epics is through a [roadmap view](https://docs.gitlab.com/ee/user/group/roadmap/#roadmap). The main advantage of this feature is that it allows the collaborators on epics and issues to monitor project progress using a calendar timeline view.\n\n![An example of a project management timeline for the IBM cloud paks project using the epics roadmap view](https://about.gitlab.com/images/blogimages/proj-mgmt-timeline.png){: .shadow.medium.center}\nThe same IBM cloud paks project epic is depicted using the Roadmap view, which adopts a timeline view.\n{: .note.text-center}\n\n### How issues are used to capture work\n\nClick into any of the epics to find a set of issues that make up the epic. I use [issues as the basic unit of work](https://docs.gitlab.com/ee/user/project/issues/). Contained within the \"IBM cloud paks: Pre-launch\" epic are 33 issues.\n\n![The list view shows inside the \"IBM cloud paks: Pre-launch\" epic are 33 issues](https://about.gitlab.com/images/blogimages/proj-mgmt-issue.png){: .shadow.medium.center}\nInside the \"IBM cloud paks: Pre-launch\" epic are 33 issues\n{: .note.text-center}\n\nOne thing to note is that an issue can have a single assignee or owner, or it can have multiple assignees.\n\n### How to use issue boards\n\nAn [agile board](/blog/gitlab-for-agile-portfolio-planning-project-management/) can help a user visualize work and manage all the open threads in a given epic and/or project. The board can help you move issues efficiently through various phases of work. On the Alliances team, we are always iterating on how to better track the status of issues. [Here is more information about the current status flows for the Alliances team](/handbook/alliances/#status-alliance---status--status).\n\nThe screenshot below shows how an [issue board can be applied as a Kanban board by filtering for the \"IBM\" label](https://docs.gitlab.com/ee/user/project/issue_board.html#issue-boards). To see transitions between work stages, use [scoped labels](https://docs.gitlab.com/ee/user/project/labels.html#scoped-labels), which are mutually exclusive and represent transitions between various workflow statuses, such as \"status::1\" and \"status::2\"\n\n![Kanban board showing how labels can be used to organize issues into work stages](https://about.gitlab.com/images/blogimages/proj-mgmt-board.png){: .shadow.medium.center}\nHow we use boards for the IBM cloud paks project.\n{: .note.text-center}\n\n### Milestones help time-box events\n\nWhile an epic is a collection of related issues, [merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/), and sub-epics and is generally used to scope a long-running initiative or program (e.g., a marketing campaign or a new product category) epics can also contain smaller, more discrete and timeboxed events, such as monthly releases or calendar quarters. These [timeboxes are represented as Milestones](https://docs.gitlab.com/ee/user/project/milestones/), which roll up issues and merge requests in the same way as higher-level epics. Apply the \"Milestone view\" to track progress on the smaller deliverables within an epic.\n\n![Milestone view showing Alliances team projects](https://about.gitlab.com/images/blogimages/proj-mgmt-milestone.png){: .shadow.medium.center}\nHow milestones can be used to track work progress within a specific time frame.\n{: .note.text-center}\n\n### How Milestone burnup and burndown charts chart progress\n\n[Burnup and burndown charts are used by project managers to measure progress](https://docs.gitlab.com/ee/user/project/milestones/burndown_and_burnup_charts.html). Burndown charts analyze how much work is left in a project before it can be finished successfully. Burnup charts measure the work that has been done against the total work for the project. Both types of charts are available in the GitLab DevOps platform. I relied mostly on epics and milestones to track work progress for the IBM partnership.\n\n![burndown](https://about.gitlab.com/images/blogimages/proj-mgmt-burndown.png){: .shadow.medium.center}\nThe burdown and burnup charts for the IBM cloud paks partnership project.\n{: .note.text-center}\n\n### Inside analytics and insights project management tools\n\nMost project management tools are great at capturing project details, and can help answer questions such as \"where does the project stand on actual vs. planned activities?\" or can help track progress using milestones and due dates. [Project analytics and insights dashboards](https://docs.gitlab.com/ee/user/analytics/#project-level-analytics) are built into the GitLab DevOps platform. There are many built-in analytics dashboards, such as CI/CD, code review, merge requests, and issues. For the IBM partnership project, I used the [issues dashboard analytics](https://docs.gitlab.com/ee/user/group/issues_analytics/index.html) to see how many issues were opened compared to how many issues were closed. This tool helped me manage the team capacity and identify any bottlenecks in the project.\n\n![The insights dashboard shows how many issues were opened and closed](https://about.gitlab.com/images/blogimages/proj-mgmt-insights.png){: .shadow.medium.center}\nThe insights dashboard shows many issues were opened vs. how many issues were closed each month.\n{: .note.text-center}\n\n[Value Stream Analytics](https://docs.gitlab.com/ee/user/group/value_stream_analytics/) is a particularly unique feature of GitLab's analytics suite. Since GitLab is a complete DevOps platform with a single data store, GitLab can automatically generate reports to not only identify high-level metrics and blockers, but also drill down into those blockers and improve value flow with just a few clicks.\n\n![Showing recent project activity: 32 new issues and 19 commits](https://about.gitlab.com/images/blogimages/proj-mgmt-analysis.png){: .shadow.medium.center}\nAnalytics showing recent project activity.\n{: .note.text-center}\n\nThe Value Stream Analytics provides a high-level view into common stages of the SDLC out-of-the-box, making it easier to monitor the overall workflow from discussion to code changes, through review and collaboration, and out to production – with no additional work required. And since the code changes and collaboration are happening within GitLab, just one click on an item will take you to the blocked issue or merge request, so you can comment, reassign, or contribute to move things along.\n\nSince all the necessary data is already in GitLab's system, customizing Value Stream Analytics can be completed in just a few clicks: Hiding and reordering stages and even creating your own with simple drop-down menus.\n\n![The customized value stream shows the average amount of time spent in the selected stage for each item](https://about.gitlab.com/images/blogimages/proj-mgmt-valuestream.png){: .shadow.medium.center}\nThe custom value stream above shows the number of days to completion.\n{: .note.text-center}\n\n## DevOps platform and project management in one\n\nThere are many project management tools in the marketplace and solutions for managing the SDLC of a project. The GitLab DevOps platform and project management tool satisfied my need to track partnership-related activities while also managing the technical demos and workshops developed for the IBM partnership. I look forward to continuing to explore the constantly-evolving GitLab platform to grow and manage our strategic partnerships on the Alliances team.\n\nCover image by [Martin Sanchez](https://unsplash.com/@martinsanchez?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/MD6E2Sv__iA)\n{: .note.text-center}\n",[1646,1347,749,2409,727],{"slug":4032,"featured":6,"template":678},"project-management-using-gitlab-platform","content:en-us:blog:project-management-using-gitlab-platform.yml","Project Management Using Gitlab Platform","en-us/blog/project-management-using-gitlab-platform.yml","en-us/blog/project-management-using-gitlab-platform",{"_path":4038,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4039,"content":4045,"config":4051,"_id":4053,"_type":16,"title":4054,"_source":17,"_file":4055,"_stem":4056,"_extension":20},"/en-us/blog/using-web-components-to-encapsulate-css-and-resolve-design-system-conflicts",{"title":4040,"description":4041,"ogTitle":4040,"ogDescription":4041,"noIndex":6,"ogImage":4042,"ogUrl":4043,"ogSiteName":692,"ogType":693,"canonicalUrls":4043,"schema":4044},"Using web components to encapsulate CSS and resolve design system conflicts","How we used web component technologies like the Shadow DOM to make it easy to incrementally adopt our new design system, Slippers.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679537/Blog/Hero%20Images/slippers-sys.jpg","https://about.gitlab.com/blog/using-web-components-to-encapsulate-css-and-resolve-design-system-conflicts","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Using web components to encapsulate CSS and resolve design system conflicts\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tyler Williams\"}],\n        \"datePublished\": \"2021-05-03\",\n      }",{"title":4040,"description":4041,"authors":4046,"heroImage":4042,"date":4048,"body":4049,"category":14,"tags":4050},[4047],"Tyler Williams","2021-05-03","\n\n## The goal: A new design for the GitLab blog\n\nIn March 2021, the [Digital Experience team](/handbook/marketing/digital-experience/) deployed a new and improved design for the GitLab blog. This design change affected more than 1,300 blog posts. It is the largest exercise to date for [our design system, Slippers](https://gitlab.com/gitlab-com/marketing/digital-experience/slippers-ui). It presented challenges due to the age and size of the GitLab blog. We wanted to live up to GitLab's [iteration value](https://handbook.gitlab.com/handbook/values/#iteration): \"Do the smallest thing possible and get it out as quickly as possible\".\n\n## The major challenge: Incrementally adopting a new design system with conflicting CSS\n\n[Slippers uses Tailwind CSS](https://gitlab-com.gitlab.io/marketing/inbound-marketing/slippers-ui/?path=/story/tailwind-css--page), which comes with its own set of base styles, called [Preflight](https://tailwindcss.com/docs/preflight). Preflight acts like normalizing styles (it's built on top of [modern-normalize](https://github.com/sindresorhus/modern-normalize)), which is useful for new projects, or projects making a full transition. In our case, Preflight is a hurdle because it has to work alongside our existing CSS.\n\nWe explored some out-of-the-box solutions, such as enabling the Tailwind [!important configuration](https://tailwindcss.com/docs/configuration#important), or using a [very specific selector strategy](https://tailwindcss.com/docs/configuration#selector-strategy).\n\nWe got very close to our desired outcome in both cases, but a problem remained:\n\nCritical legacy components required the old CSS. Those old styles were getting past `!important` and selector strategies because they applied to attributes we had not specified in our Tailwind utilities. Resolving those conflicts would take too much time and manual effort. We wanted a more [efficient](https://handbook.gitlab.com/handbook/values/#efficiency) solution, so we focused on two things: Identifying an ideal state for our CSS and finding a better CSS encapsulation. The goal was to prevent existing styles from affecting new components, and new styles from affecting old components.\n\n## The solution: CSS encapsulation with web components\n\n[Web component technologies](https://developer.mozilla.org/en-US/docs/Web/Web_Components) offered a compelling solution to the requirement that we use the old CSS. We used the [shadow DOM](https://developer.mozilla.org/en-US/docs/Web/Web_Components/Using_shadow_DOM) to encapsulate CSS. [Templates and slots](https://developer.mozilla.org/en-US/docs/Web/Web_Components/Using_templates_and_slots) allowed us to use existing HTML, ERB, and HAML templates. [Custom elements](https://developer.mozilla.org/en-US/docs/Web/Web_Components/Using_custom_elements) brought it all together.\n\nIn the [top-level blog template](https://gitlab.com/gitlab-com/www-gitlab-com/-/blob/master/sites/uncategorized/source/includes/cms/blog_post/slippers-blog-post.erb), we placed a [template tag for the blog post markup](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/77190/diffs#5667df8046398e47cb04d02fcc386420afc7ab57_35_28). The `template` tag is valid HTML – meaning our templating engine can process everything inside it. We can use `partial` tags and `yield` as expected and they become part of the template. The output below shows what that looks like (some classes omitted for brevity):\n\n```erb\n\u003Ctemplate id=\"slp-blog\">\n  \u003Cmain class=\"slpBlog\">\n    \u003Cheader class=\"slpBlog__header\">\n      \u003C%= partial \"includes/cms/blog_post/slp-blog-avatar\", locals: { author: author } %>\n      \u003C%= partial \"includes/cms/blog_post/slp-tags\" %>\n      \u003Chr/>\n    \u003C/header>\n    \u003Carticle class=\"slpBlog__article\">\n      \u003C% if current_page.data.image_title %>\n        \u003Cimg alt=\"\" src=\"\u003C%= current_page.data.image_title %>\" width=\"100%\"/>\n      \u003C% end %>\n      \u003C%= yield %>\n    \u003C/article>\n    \u003Caside class=\"slpBlog__aside\">\n      \u003C%= partial \"includes/cms/blog_post/slp-social-follow\" %>\n      \u003Cslot name=\"non-slippers-aside-items\">\u003C/slot>\n    \u003C/aside>\n    \u003Cfooter class=\"slpBlog__footer\">\n      \u003Chr/>\n      \u003C%= partial \"includes/cms/blog_post/slp-related-content\" %>\n      \u003Cslot name=\"non-slippers-footer-items\">\u003C/slot>\n      \u003Chr/>\n    \u003C/footer>\n  \u003C/main>\n\u003C/template>\n\u003Cscript src=\"/javascripts/slippers-blog.js\" type=\"text/javascript\">\u003C/script>\n```\n\nThe top-level template loads [`source/javascripts/slippers-blog.js`](https://gitlab.com/gitlab-com/www-gitlab-com/-/blob/master/source/javascripts/slippers-blog.js) inside the `body` of the document, which blocks rendering until the script finishes loading. `source/javascripts/slippers-blog.js` imports Slippers CSS as a variable [using webpack loader syntax](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/77190/diffs#5b5ceecb366e6e69e99e2bae290c68bae177fc17_0_2). With the CSS stored as a variable, we can inject it into the [custom element definition](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/77190/diffs#5b5ceecb366e6e69e99e2bae290c68bae177fc17_0_6).\n\nNext, we register `slp-blog` as a custom element. When the DOM parses the markup, it will either render the blog post template or, in the rare circumstance our JavaScript didn't load, it will fail. If it fails, we fall back to the [`yield` output in the light DOM](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/77190/diffs#5667df8046398e47cb04d02fcc386420afc7ab57_35_55) to make sure critical content is never lost. In these cases, our static site generator already rendered the template, so the images and text of the blog post remain accessible to the visitor.\n\nHere's what that JavaScript looks like:\n\n```js\nimport Vue from 'vue/dist/vue.min.js'\nimport Slippers from 'slippers-ui/dist/slippersComponents.common.js'\n\n// eslint-disable-next-line import/no-webpack-loader-syntax\nconst css = require(\"!raw-loader!sass-loader!../stylesheets/slippers.css.scss\").default;\n\n// Some event handlers and other requirements omitted for brevity\n\nexport function initializeSlippersWebComponent() {\n    if (window.customElements) {\n        customElements.define('slp-blog',\n            class extends HTMLElement {\n                constructor() {\n                    super();\n                    const template = document.getElementById('slp-blog').content;\n                    const shadowRoot = this.attachShadow({ mode: 'open' });\n                    shadowRoot.innerHTML = `\u003Cstyle>${css}\u003C/style>`;\n                    shadowRoot.appendChild(template.cloneNode(true));\n                }\n            });\n    }\n}\n```\n\nIf the script successfully loads, the light DOM content generated by our fallback `yield` statement is thrown away when the custom component is rendered. This is why we use an inline script tag beforehand - to avoid a [flash of unstyled content](https://en.wikipedia.org/wiki/Flash_of_unstyled_content#:~:text=A%20flash%20of%20unstyled%20content,before%20all%20information%20is%20retrieved.).\n\nFinally, we can use [slots to render non-Slippers items](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/77190/diffs#5667df8046398e47cb04d02fcc386420afc7ab57_40_59). Slotted elements get CSS from the light DOM, so our preexisting [partials and other included templating](https://gitlab.com/gitlab-com/www-gitlab-com/-/tree/master/sites/uncategorized/source/includes/blog) will still work as expected.\n\nOur custom element and its slots look something like this:\n\n```erb\n\u003Cslp-blog>\n  \u003C%= yield %>\n  \u003Cdiv slot=\"non-slippers-aside-items\">\n    \u003C%= partial \"includes/newsletter-signup.html\" %>\n  \u003C/div>\n  \u003Cdiv slot=\"non-slippers-footer-items\">\n    \u003C% unless current_page.data.install_cta == false %>\n      \u003C%= partial \"includes/blog/try\" %>\n    \u003C% end %>\n    \u003C% if ci_environment? %>\n      \u003C%= partial \"includes/blog/comments\" %>\n    \u003C% end %>\n  \u003C/div>\n\u003C/slp-blog>\n```\n\n## Results: Rapid iteration with minimal tradeoffs\n\nOur solution has some tradeoffs:\n\n1. We added complexity to the build process for our blog posts.\n1. Web components have wide browser support, but that's only a recent development. The best practices around these tools are still being debated.\n1. Technically, we added client-side rendering to our statically generated site, meaning we're giving up some of the static site benefits to achieve our CSS encapsulation.\n\nThose tradeoffs are worth it in the end. We achieved near-perfect CSS encapsulation which allowed us to iterate on Slippers and ship the blog template efficiently. We have reasonable fallbacks in place to preserve critical content for people who can't or won't load JavaScript to read our posts. Web components are the future, and we're excited to use them responsibly.\n",[959,915],{"slug":4052,"featured":6,"template":678},"using-web-components-to-encapsulate-css-and-resolve-design-system-conflicts","content:en-us:blog:using-web-components-to-encapsulate-css-and-resolve-design-system-conflicts.yml","Using Web Components To Encapsulate Css And Resolve Design System Conflicts","en-us/blog/using-web-components-to-encapsulate-css-and-resolve-design-system-conflicts.yml","en-us/blog/using-web-components-to-encapsulate-css-and-resolve-design-system-conflicts",{"_path":4058,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4059,"content":4064,"config":4070,"_id":4072,"_type":16,"title":4073,"_source":17,"_file":4074,"_stem":4075,"_extension":20},"/en-us/blog/why-its-crucial-to-break-things-down-into-smallest-iterations",{"title":4060,"description":4061,"ogTitle":4060,"ogDescription":4061,"noIndex":6,"ogImage":2284,"ogUrl":4062,"ogSiteName":692,"ogType":693,"canonicalUrls":4062,"schema":4063},"Why iterative software development is critical","How we learned from our mistakes and adopted an iterative software development mentality to reduce the likelihood of shipping something that doesn't add value.","https://about.gitlab.com/blog/why-its-crucial-to-break-things-down-into-smallest-iterations","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why iterative software development is critical\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matej Latin\"}],\n        \"datePublished\": \"2021-04-30\",\n      }",{"title":4060,"description":4061,"authors":4065,"heroImage":2284,"date":4067,"body":4068,"category":14,"tags":4069},[4066],"Matej Latin","2021-04-30","\n\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2021-05-05.\n{: .note .alert-info .text-center}\n\nIn a previous blog post called [Small experiments, significant results](/blog/small-experiments-significant-results-and-learnings/) I shared our recent success with conducting small experiments, but, in reality, we didn't start with the most iterative software development approach. It was the Growth team's early failures to iterate that helped us embrace launching smaller experiments with measurable results.\n\nWhen the [Growth team](/handbook/engineering/development/growth/) formed at GitLab in late 2019, we had little experience with designing, implementing, and shipping experiments intended to accelerate the growth of our user base. We hired experienced people but it was still hard to predict [how long it would take to implement and ship an experiment](/handbook/engineering/development/growth/#running-experiments). The \"Suggest a pipeline\" experiment was the first one I worked on with the Growth:Expansion team. The idea was simple: Guide users through our UI to help them set up a [CI/CD pipeline](/blog/guide-to-ci-cd-pipelines/).\n\n![The guided tour entry](https://about.gitlab.com/images/blogimages/smallest-iterations/suggest.png)\nThe first iteration of the \"suggest a pipeline\" guided tour.\n{: .note.text}\n\n[See the original prototype of the \"suggest a pipeline\" guided tour.](https://www.sketch.com/s/1794d37d-c722-4d32-862e-9c6c5d831149/a/zn1Z9o/play)\n\nThe guided tour would start on the merge request page and ask the user if they want to learn how to set up a CI/CD pipeline. Those who opted in would be led through the three steps required to complete the setup. The team saw this as a simple three-step guide, so we committed ourselves to ship it without first considering if it was the smallest experiment we could complete. We wanted to create a guided tour because it hadn't been done yet at GitLab, but in the end, this wasn't the most iterative software development approach. Today, our thinking is: \"What's the smallest thing we can test and learn from?\"\n\nOne of GitLab's company values is [iteration](https://handbook.gitlab.com/handbook/values/#iteration) which means that we strive to do *the smallest thing possible and get it out as quickly as possible*. The concept of [MVC (minimal viable change)](https://handbook.gitlab.com/handbook/values/#minimal-viable-change-mvc) guides this philosophy:\n\n> We encourage MVCs to be as small as possible. Always look to make the quickest change possible to improve the user's outcome.\n\nWhile looking back, I realized we failed to embrace the MVC with the \"suggest a pipeline\" experiment, but I'm grateful for that mistake because it provided us with one of the most valuable lessons: Always strive to complete the smallest viable change first. The idea of iterative software development is valuable even, or maybe especially, with experiments.\n\nBelow are five reasons why it's important to break development down. Small iterations:\n\n- Gets value to the user faster.\n- Decreases the risk of shipping something that doesn't add value.\n- Are easier to isolate and understand the impact of the changes.\n- Ship faster so the team starts learning sooner.\n- Allow teams to begin thinking about further iterations sooner or decide to abandon the experiment earlier (saving both time and resources).\n\n![Small vs large iterations](https://about.gitlab.com/images/blogimages/smallest-iterations/chart.jpg)\nThe power of iterative software development is clear by the two workflows.\n{: .note.text}\n\n\nIn the \"non-experimental work\" figure above, team one shipped a smaller iteration quickly and updated it twice, while team two only shipped one large iteration in the same time. Team one learned from their first small iteration and adapted their solution twice in the time team two shipped a larger iteration. It took team two longer to ship the large iteration and they sacrificed earlier findings they could have used to optimize their solution.\n\nIn the \"experimental work\" figure, team one shipped a smaller first iteration and reviewed early results, which helped them make an evidence-based decision as to iterate further on their first idea, or abandon it and move on to a new idea. Through this iterative software development process, they could either ship three iterations of their first idea or abandon it and start working on the first iteration of idea two. Team one could accomplish all this development in the same amount of time it took team 2 to ship a larger first iteration of idea one. Team one is much more likely to come to successful results and learnings faster than team two.\n\n## How the \"suggest a pipeline\" experiment _should_ have been done\n\nIt's easy to reflect on our project today and see what we did wrong, but such reflection allows us to avoid repeating mistakes. The GitLab guided tour looked like a simple experiment to build and ship, but in the end it wasn't and took months to complete. Overall, the experiment was successful, but after it was implemented we took a second look and saw the project could be improved. We decided to implement some improvements by iterating on the copy in our first nudge to users to encourage more users to opt-in. Had we shipped a smaller experiment sooner, we could have iterated earlier and delivered an optimal version of the first nudge, allowing more users to benefit from the guided tour.\n\n![Had we shipped a smaller iteration, we would have improved the copy of our opt-in nudge to users sooner.](https://about.gitlab.com/images/blogimages/smallest-iterations/copy-changes.jpg)\nThe second iteration of our opt-in copy is much stronger. Shipping a smaller iteration would have encouraged more users to opt-in to our experimental \"guided tour\" feature.\n{: .note.text}\n\nBecause it took us months to complete the implementation of the experiment, it also took us months to iterate on it.\n\nIf I had to do a similar experiment now, I'd start much smaller, with something that could be built and shipped in less than a month, ideally even faster. For example, we could have shipped an iteration with that first nudge linking to an existing source that explains how to set up a pipeline. That would have enabled us to validate the placement of the nudge, its content, and its effectiveness. It would have significantly reduced the risk of the experiment.\n\nOr maybe we could have [shortened the guided tour to be just two steps](https://gitlab.com/gitlab-org/growth/product/-/issues/1662/), which is exactly what [Kevin Comoli](/company/team/#kcomoli), product designer on Growth: Conversion, did. But because our idea already seemed like a small iteration, we never felt the urgency to reduce it further. So here's another reason why it's important to really think about the smallest possible iteration first: you can never be sure that what you're aiming to do will actually be as quick and simple as expected. So even when you think that your idea is the smallest possible iteration, *think again*.\n\n## How we're applying lessons on iteration to future experiments\n\nWhen I started working on the [\"invite members\" experiment](/blog/small-experiments-significant-results-and-learnings/), my vision of how the experience should be was more complex than the \"suggest a pipeline\" guided tour experience. The idea behind the \"invite members\" experiment was that any user could invite their team members to a project and an admin user would have to approve the invitation. But because of our learnings from the pipeline tour we decided to simplify the first experiment. Instead of designing and building a whole experience, we decided to use a [painted door test](https://crstanier.medium.com/a-product-managers-guide-to-painted-door-tests-a1a5de33b473), which essentially means we are focusing on tracking the main call-to-action to gauge user interest. For the \"invite members\" experiment, the painted door test involved displaying an invite link that, once clicked, displayed a message to users that the feature wasn't ready and suggested a temporary solution. This allowed us to validate the riskiest part of the experiment: Do non-admin users even _want_ to invite their colleagues?\n\n![Modal showing \"invite members\" feature isn't ready yet](https://about.gitlab.com/images/blogimages/smallest-iterations/modal-not-ready.png)\nThe \"invite members\" painted door experiment involved displaying a modal showing that the feature wasn't ready yet, but helped us still gauge user interest in the feature before investing resources in developing the feature.\n{: .note.text}\n\n## Why iterative software development matters\n\nWe were lucky with the \"suggest a pipeline\" experiment. It was the first experiment we worked on, and it was \"low hanging fruit\", meaning it was a solution that required limited investment but still delivered big returns, which made the chance of failure lower. As we move away from obvious improvements and start exploring riskier experiments, we won't be able to rely on luck. We need to be diligent about iteration and break things down into MVCs and smaller experiments to reduce the risk of investing development time on projects that don't add value to the user experience, or fail to have a positive impact on GitLab's growth.\n\nPhoto by [Markus Spiske](https://unsplash.com/@markusspiske?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/pieces?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[2855,915,1144],{"slug":4071,"featured":6,"template":678},"why-its-crucial-to-break-things-down-into-smallest-iterations","content:en-us:blog:why-its-crucial-to-break-things-down-into-smallest-iterations.yml","Why Its Crucial To Break Things Down Into Smallest Iterations","en-us/blog/why-its-crucial-to-break-things-down-into-smallest-iterations.yml","en-us/blog/why-its-crucial-to-break-things-down-into-smallest-iterations",{"_path":4077,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4078,"content":4083,"config":4089,"_id":4091,"_type":16,"title":4092,"_source":17,"_file":4093,"_stem":4094,"_extension":20},"/en-us/blog/puma-nakayoshi-fork-and-compaction",{"title":4079,"description":4080,"ogTitle":4079,"ogDescription":4080,"noIndex":6,"ogImage":1579,"ogUrl":4081,"ogSiteName":692,"ogType":693,"canonicalUrls":4081,"schema":4082},"Ruby 2.7: Understand and debug problems with heap compaction","An overview of Ruby 2.7 heap compaction and the risks it adds to production Rails applications.","https://about.gitlab.com/blog/puma-nakayoshi-fork-and-compaction","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Ruby 2.7: Understand and debug problems with heap compaction\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matthias Käppler\"}],\n        \"datePublished\": \"2021-04-28\",\n      }",{"title":4079,"description":4080,"authors":4084,"heroImage":1579,"date":4086,"body":4087,"category":14,"tags":4088},[4085],"Matthias Käppler","2021-04-28","\n\nThe GitLab Rails application runs on [Puma](https://puma.io/), a multi-threaded Rack application server written in the new Ruby.\nWe recently updated Puma to major version 5, which introduced [a number of important\nchanges](https://github.com/puma/puma/blob/master/History.md#500--2020-09-17),\nincluding support for _compaction_, a technique to reduce memory fragmentation in the\nRuby heap.\n\nIn this post we will describe what Puma's \"nakayoshi fork\" does, what compaction is,\nand some of the challenges we faced when first deploying it.\n\n## Nakayoshi: A friendlier `fork`\n\nPuma 5 added a new configuration switch: `nakayoshi_fork`. This switch affects Puma's behavior when\nforking new workers from the primary process. It is largely based on a [Ruby gem of the same name](https://github.com/ko1/nakayoshi_fork)\nbut adds new functionality. More specifically, enabling `nakayoshi_fork` in Puma will result in two additional\nsteps prior to forking into new workers:\n\n1. **Tenuring objects.** By running several minor garbage collection cycles ahead of a `fork`, Ruby can promote survivors\n   from the young to the old generation (referred to as \"tenuring\"). These objects are often classes, modules, or long-lived\n   constants that are unlikely to change.\n   This process makes forking copy-on-write friendly because tagging an object as \"old\" implies a write\n   to the underlying heap page. Doing this prior to forking means the OS won't have\n   to copy this page from the parent to the worker process later. We won't be discussing copy-on-write in detail but\n   [this blog post offers a good introduction to the topic and how it relates to Ruby and pre-fork servers](https://brandur.org/ruby-memory).\n\n1. **Heap compaction.** Ruby 2.7 added a new method `GC.compact`, which\n   will reorganize the Ruby heap to pack objects closer together when invoked. `GC.compact` reduces Ruby heap fragmentation and\n   potentially frees up Ruby heap pages so that the physical memory consumed can be reclaimed by the OS.\n   This step only happens when `GC.compact` is available in the version of Ruby that is in use (for MRI, 2.7 or newer).\n\nIn the remainder of this post, we will look at:\n\n* How `GC.compact` works and its potential benefits.\n* Why using C-extensions can be problematic when using compaction.\n* How we resolved a production incident that crashed GitLab.\n* What to look out for before enabling compaction in your app, via `nakayoshi_fork` or otherwise.\n\n## How compacting garbage collection works\n\nThe primary goal of a compacting garbage collector (GC) is to use allocated memory more\neffectively, which increases the likelihood of the application using less memory over time.\nCompaction is especially important when processes can share memory, as is the case with Ruby pre-fork\nservers such as Puma or Unicorn. But how does Ruby accomplish this?\n\nRuby manages its own object heap by allocating chunks of memory from the operating system called pages\n(a confusing term since Ruby heap pages are distinct from the smaller memory pages managed by the OS itself).\nWhen an application asks to create a new object, Ruby will try to find a free object slot in one of these\npages and fill it. As objects are allocated and deallocated over the lifetime of the application,\nthis can lead to fragmentation, with pages being neither entirely full nor entirely empty. This is the\nprimary cause for Ruby's infamous runaway memory problem: Since the available space isn't optimally used,\npages will rarely be entirely empty and become \"tomb pages\" which means it is necessary for the pages to be empty for them to be deallocated.\n\nRuby 2.7 added a new method, `GC.compact`, which aims to address this problem by walking the entire\nRuby heap space and moving objects around to obtain tightly packed pages. This process will ideally make\nsome pages unused, and unused memory can be reclaimed by the OS. [Watch this video from RubyConf 2019](https://www.youtube.com/watch?v=H8iWLoarTZc) where Aaron Patterson, the author of this feature, gave a good introduction to compacting GC.\n\nCompaction is a fairly expensive task since Ruby needs to stop-the-world for a complete heap reorganization so\nits best to perform this task before forking a new worker process, which is why Puma 5 included this step when performing `nakayoshi_fork`. Moreover, running compaction before forking\ninto worker processes increases the chance of workers being able to share memory.\n\nWe were eager to enable this feature on GitLab to see if it would reduce memory consumption, but things didn't entirely go as planned.\n\n## Inside the incident\n\nAfter extensive testing via our automated performance test suite and in preproduction\nenvironments, we felt ready to explore compaction on production nodes. We kept a\n[detailed, public record of what happened\nduring this production incident](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/3370), but the key details are summarized below:\n\n* The deployment passed the canary stage, meaning workers who had their heaps compacted were serving traffic\n  successfully at this point.\n* Sometime during the full fleet rollout, problems emerged: Error rates started spiking but not\n  across the entire fleet. This phenomenon is odd because errors tend to spread across all workers due to load balancing.\n* The error messages surfacing in Sentry were mysterious at best:\n  `ActionView::Template::Error\nuninitialized constant #\u003CClass:#GrapePathHelpers::DecoratedRoute:0x00007f95f10ea5b8>::UNDERSCORE`. Remember this error message for later.\n* We discovered the affected workers were segfaulting in [`hamlit`](https://github.com/k0kubun/hamlit),\n  a high-performance HAML compiler. Hamlit uses a C-extension to achieve better performance. The segfaulting and the fact\n  that we were rolling out an optimization that touches GC-internal structures was a tell-tale sign that\n  compaction was likely to be the cause.\n* We rolled back the change to quickly recover from the outage.\n\n## How we diagnosed the problem\n\nWe were disappointed by this setback and wanted to understand why the outage occurred. Fortunately,\nRuby provides detailed stack traces when crashing in C-extensions. The most effective way\nto quickly analyze these is to look for transitions where a C-extension calls into the Ruby VM\nor vice versa. These lines therefore caught our attention:\n\n```shell\n...\n/opt/gitlab/embedded/lib/libruby.so.2.7(sigsegv+0x52) [0x7f9601adb932] signal.c:946\n/lib/x86_64-linux-gnu/libc.so.6(0x7f960154c4c0) [0x7f960154c4c0]\n/opt/gitlab/embedded/lib/libruby.so.2.7(rb_id_table_lookup+0x1) [0x7f9601b15e11] id_table.c:227\n/opt/gitlab/embedded/lib/libruby.so.2.7(rb_const_lookup+0x1e) [0x7f9601b4861e] variable.c:3357\n/opt/gitlab/embedded/lib/libruby.so.2.7(rb_const_get+0x39) [0x7f9601b4a049] variable.c:2339\n# ^--- Ruby VM functions\n/opt/gitlab/embedded/lib/ruby/gems/2.7.0/gems/hamlit-2.11.0/lib/hamlit/hamlit.so(str_underscore+0x16) [0x7f95ee3518f8] hamlit.c:17\n/opt/gitlab/embedded/lib/ruby/gems/2.7.0/gems/hamlit-2.11.0/lib/hamlit/hamlit.so(rb_hamlit_build_id) hamlit.c:100\n# ^-- hamlit C-extension\n...\n```\n\nThe topmost stack frame reveals the preceeding calls led to a segmentation fault (`SIGSEGV`).\nWe highlighted the lines where Hamlit calls back into Ruby: In a function called `str_underscore` which\nwas called by `rb_hamlit_build_id`. The `rb_*` prefix tells us that this is a C-function we can call from Ruby,\nand indeed it is used by [`Hamlit::AttributeBuilder`](https://github.com/k0kubun/hamlit/blob/master/lib/hamlit/attribute_builder.rb) to construct DOM `id`s.\n\nBut we still don't know why it is crashing. Next, we need to inspect what happens in `str_underscore`.\nWe can see that this function performs a constant lookup on `mAttributeBuilder` – searching\nfor a constant called `UNDERSCORE`. When following the breadcrumbs it turns out to simply be the string `\"_\"`.\nIt is this lookup that failed.\n\nWait -- `UNDERSCORE`? That sounds familiar. Recall the top-level error messages:\n\n```\nActionView::Template::Error\nuninitialized constant #\u003CClass:#GrapePathHelpers::DecoratedRoute:0x00007f95f10ea5b8>::UNDERSCORE\n```\n\nBut `GrapePathHelpers` is clearly not a Hamlit class. Hamlit is trying to look up its own `UNDERSCORE`\nconstant on a class in the [`grape`](https://github.com/ruby-grape/grape) gem, an entirely different library\nthat is not involved in HTML rendering at all and there is no such constant defined on Grape's\n`DecoratedRoute` class either.\n\nNow the penny dropped – remember how compaction moves around objects in Ruby's heap space? Classes in\nRuby are objects too, so `GC.compact` must have moved a Grape class into an object slot that was previously\noccupied by a Hamlit class object, but Hamlit's C-extension never saw it coming!\n\n## How we solved the problem\n\nTo be clear, what happened above should _not_ happen with a well-behaved C-extension. Compaction\nwas developed carefully with support for C-extensions that predate Ruby 2.7, so all\nexisting Ruby gems would continue to operate normally.\n\nSo what went wrong? When a C-extension allocates Ruby objects, it must _mark_ them for as long as\nthey are alive. A marked object will not be garbage collected and because the Ruby GC cannot reason about objects\noutside of its own purview (i.e., objects created from Ruby code), it needs to rely on C-extensions\nto correctly mark and unmark objects themselves.\n\nNow comes the twist: Marked objects can be moved during compaction and existing C-extensions\ncan't cope with an object they hold pointers to suddenly move into a different slot.\nTherefore, Ruby 2.7 does something clever: It \"pins\" objects allocated with the mark function that existed prior\nto Ruby 2.7, meaning the pinned objects are not allowed to move during compaction. For new code, it introduces\na special mark-but-don't-pin function that will also allow an object to move, giving gem authors the\nopportunity to make their libraries compaction-aware.\n\nHamlit does not implement compaction support, so this could only mean one thing:\nHamlit wasn't even properly marking those objects, otherwise Ruby 2.7\nwould have automatically pinned them so they wouldn't move during compaction.\nAfter [discussing an attempted fix we submitted](https://github.com/k0kubun/hamlit/pull/171) but without\na reliable way to reproduce the issue for everyone, the Hamlit author decided to sidestep the\nproblem by [resolving those constants statically instead](https://github.com/k0kubun/hamlit/pull/172)\nand marking each via `rb_gc_register_mark_object`.\nThis change landed in [Hamlit 2.14.2](https://github.com/k0kubun/hamlit/blob/master/CHANGELOG.md#2142---2021-01-21)\nwhich we confirmed resolves the issue.\n\n## The next steps\n\nIt is exciting to see that the Ruby community is making progress on making Ruby a more memory-efficient\nlanguage but we learned that we need to step carefully when introducing such wide-reaching changes to a large\napplication like GitLab. It is difficult to investigate and fix problems that crash the Ruby VM, which is more likely for\nany library that uses C-extensions.\n\nTwo particular action items we took away from this were:\n\n1. **More reliable detection of compaction-related issues in CI.** We're not going to sugar-coat this:\n   We detected the problem late. Our comprehensive test suite was passing, our QA and performance tests\n   on staging environments passed, and the problem didn't even show up in canary deployments. Ideally, we\n   would have caught this issue with our automated test suite. One way to test whether compaction causes problems\n   is by using `GC.verify_compaction_references` – this is a rather crude tool because it requires\n   keeping two copies of the Ruby heap, which can be prohibitively expensive in terms of memory use. We\n   have therefore not yet decided how to approach this.\n1. **Improve our ability to roll out system configuration gradually.** Puma is part of our core infrastructure,\n   since it sits in the path of every web request, which makes it especially risky to experiment with Puma\n   configuration. GitLab already supports [feature flags](https://docs.gitlab.com/ee/development/feature_flags/index.html)\n   to allow developers to roll out product changes gradually, but it presents us with a catch-22 when\n   making changes at the infrastructure level, because to query the state of a feature flag, the infrastructure\n   needs to already be up and running. It would be ideal to have a similar mechanism for system configuration, [which we are currently exploring](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/154).\n\nWhile performance is a major focus for us at the moment it must not compromise availability.\nWe will continue to monitor developments in the Ruby community around compaction support, but decided to\nnot use it in production at this point in time since the gains don't appear to outweigh the risks.\n",[1286,704,915],{"slug":4090,"featured":6,"template":678},"puma-nakayoshi-fork-and-compaction","content:en-us:blog:puma-nakayoshi-fork-and-compaction.yml","Puma Nakayoshi Fork And Compaction","en-us/blog/puma-nakayoshi-fork-and-compaction.yml","en-us/blog/puma-nakayoshi-fork-and-compaction",{"_path":4096,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4097,"content":4103,"config":4109,"_id":4111,"_type":16,"title":4112,"_source":17,"_file":4113,"_stem":4114,"_extension":20},"/en-us/blog/gitops-done-3-ways",{"title":4098,"description":4099,"ogTitle":4098,"ogDescription":4099,"noIndex":6,"ogImage":4100,"ogUrl":4101,"ogSiteName":692,"ogType":693,"canonicalUrls":4101,"schema":4102},"3 Ways to approach GitOps","Learn about how GitLab users can employ GitOps to cover both Kubernetes and non-Kubernetes environments","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669635/Blog/Hero%20Images/gitops-cover.jpg","https://about.gitlab.com/blog/gitops-done-3-ways","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"3 Ways to approach GitOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Saumya Upadhyaya\"},{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2021-04-27\",\n      }",{"title":4098,"description":4099,"authors":4104,"heroImage":4100,"date":4106,"body":4107,"category":14,"tags":4108},[4105,1020],"Saumya Upadhyaya","2021-04-27","\n\nThe term [\"GitOps\"](/topics/gitops/) first emerged in the Kubernetes community as a way for organizations to enable Ops teams move at the pace of application development. With improved automation and less risk, GitOps is quickly becoming the workflow of choice for infrastructure automation.\n\nAt GitLab, the approach to GitOps goes beyond Kubernetes. Before the buzz around GitOps picked up in the DevOps community, GitLab users and customers were applying GitOps principles to all types of infrastructure, including physical servers, virtual machines, containers, and Kubernetes clusters ([multicloud](/topics/multicloud/) and on-premise).\n\n## What is GitOps?\n\nThere are two main [approaches to GitOps](https://www.gitops.tech/), a push-based approach and a pull-based approach.\n\n- *Push-based approach*: A CI/CD tool pushes the changes to the environment. Applying GitOps via push is consistent with the approach used for application deployment. In this case, deployment targets for a push-based approach are not limited to Kubernetes.\n![push based deployment](https://about.gitlab.com/images/blogimages/gitops-push.png){: .shadow.medium.center}\nHow the push-based approach works for GitOps.\n{: .note.text-center}\n\n- *Pull-based approach*: An agent installed in a cluster pulls changes whenever there is a deviation from the desired configuration. In the pull-based approach, deployment targets are limited to Kubernetes and an agent must be installed in each Kubernetes cluster.\n![pull based deployment](https://about.gitlab.com/images/blogimages/gitops-pull.png){: .shadow.medium.center}\nHow the pull-based approach works for GitOps.\n{: .note.text-center}\n\n## How to employ GitOps principles using GitLab\n\nGitLab supports both of the approaches mentioned above, which can be used with and without a Kubernetes agent. Along with the [recently introduced Kubernetes agent](/blog/gitlab-kubernetes-agent-on-gitlab-com/), GitLab supports GitOps principles by supporting a three types of deployment targets and environments: The single application for infrastructure code; configurations using CI/CD for automation; and merge requests for collaboration and controls.\n\nBelow we unpack three methods for applying GitOps principles using GitLab technology.\n\n### Push using manually configured CI/CD release targets\n\nThe infrastructure configurations are stored in git. The user sets up the [supported deployment targets](/install/) and uses the standard CI/CD workflow to push infrastructure changes. To ensure the desired state in the repository is consistent with the environment, CI/CD will need to run on a regular schedule to identify drift and reconcile as required. Manual intervention may be required at times to cater to failed pipelines. Many GitLab users have been using this approach to push infrastructure changes to their test, staging, and production environments.\n\nThe manual push approach is ideal for both Kubernetes and supported non-Kubernetes environments, such as embedded systems, on-premise servers, mainframes, virtual machines, or FaaS offerings.\n\n### Push using Terraform\n\nIn this approach, an out-of-the box [integration with Terraform](https://docs.gitlab.com/ee/user/infrastructure/) helps Terraform users seamlessly implement GitOps workflows using GitLab. Terraform manifests are stored in the Git repository where users can collaborate on changes within the merge requests. The Terraform plan reports can be displayed within the merge requests and the Terraform state can be stored using the GitLab-managed Terraform state backend. Everything is integrated into GitLab, which spares users from performing these tasks via third-party tools or integrations.\n\nThe push approach is ideal for both Kubernetes and non-Kubernetes deployment targets that are supported by Terraform.\n\n### Pull using a Kubernetes agent\n\nIn fall 2020, GitLab [introduced a Kubernetes agent](/blog/gitlab-kubernetes-agent-on-gitlab-com/) that initiates a secure web-socket connection from a Kubernetes cluster to a GitLab instance. There is a GitLab server component that polls for any repository changes on the server and informs the agent when there is a deviation between the desired state and the cluster environment. This process helps minimize the load on the cluster and network. Whenever a drift is detected the agent pulls the latest configurations from the git repository and updates the environment accordingly. This GitOps approach requires the Kubernetes agent to be installed on every Kubernetes cluster, which can be done with ease as the GitLab Agent for Kubernetes uses GitOps principles to install and update the agent as required. This GitOps method is ideal for Kubernetes environments only.\n\n![kubernetes agent](https://about.gitlab.com/images/blogimages/gitops-agent.png){: .shadow.medium.center}\nInside the pull-based approach using a Kubernetes agent.\n{: .note.text-center}\n\n### Up next: Push using a Kubernetes agent\n\nGitLab also aims to support GitOps is by using a push approach with a Kubernetes agent. The push based approach using manually configured Kubernetes target attaches a Kubernetes cluster to GitLab through a certificate exchange. This approach leverages the CI/CD workflow for infrastructure automation and is fairly straightforward, but it also introduces risk by opening up a firewall and using cluster admin rights for cluster integration. To overcome these challenges while leveraging the CI/CD workflow - the [push-based approach using the Kubernetes agent](https://gitlab.com/groups/gitlab-org/-/epics/5528) aims to reuse the web-socket interface to establish a secure connection between GitLab and the Kubernetes cluster and allows GitLab CI/CD to securely push changes using this interface. When available, this approach would also provide a migration path for users who are currently setting up the Kubernetes integration using a certificate exchange.\n\nThe third approach is ideal for Kubernetes environments only. When available, it can be used in conjuction with the pull-based approach to optimize the GitOps workflow.\n\n## Accelerate the SDLC with GitOps principles\n\nWhether you are using physical, virtual, containers, Kubernetes - on-prem or cloud-based infrastructures – GitLab uses GitOps principles a variety of ways to meet your team wherever it's at. GitLab supports many different options because we understand the typical organization has a mixed IT landscape, with various heterogeneous technologies in a number of different environments.\n\n***What’s your preferred approach to GitOps?*** Drop us a comment.\n\n## Learn more about GitOps at GitLab\n\nRead on to explore how GitLab works with different technologies to deliver a GitOps solution for every company at every stage.\n\n* ***Blog***: [A new era of Kubernetes integrations on GitLab.com](/blog/gitlab-kubernetes-agent-on-gitlab-com/)\n* ***Webcast***: [GitLab and HashiCorp - A holistic guide to GitOps and the Cloud Operating Model](/webcast/gitlab-hashicorp-gitops/)\n* ***Testimonial***: [Shaping a financial service’s cloud strategy using GitLab and Terraform](https://www.youtube.com/watch?v=2LF3eOoGV_o&list=PLFGfElNsQthb4FD4y1UyEzi2ktSeIzLxj&index=6)\n\nCover image by [Rodolfo Cuadros](https://unsplash.com/@rocua18) on [Unsplash](https://unsplash.com/photos/JKzgp6vhJ8M)\n{: .note}\n",[535,937,1002,873],{"slug":4110,"featured":6,"template":678},"gitops-done-3-ways","content:en-us:blog:gitops-done-3-ways.yml","Gitops Done 3 Ways","en-us/blog/gitops-done-3-ways.yml","en-us/blog/gitops-done-3-ways",{"_path":4116,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4117,"content":4123,"config":4129,"_id":4131,"_type":16,"title":4132,"_source":17,"_file":4133,"_stem":4134,"_extension":20},"/en-us/blog/devops-workflows-json-format-jq-ci-cd-lint",{"title":4118,"description":4119,"ogTitle":4118,"ogDescription":4119,"noIndex":6,"ogImage":4120,"ogUrl":4121,"ogSiteName":692,"ogType":693,"canonicalUrls":4121,"schema":4122},"JSON formatting and CI/CD linting tips for DevOps workflows","Learn how to filter in JSON data structures and interact with the REST API. Use the GitLab API to lint your CI/CD configuration and dive into Git hooks speeding up your workflows.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681979/Blog/Hero%20Images/gert-boers-unsplash.jpg","https://about.gitlab.com/blog/devops-workflows-json-format-jq-ci-cd-lint","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tips for productive DevOps workflows: JSON formatting with jq and CI/CD linting automation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2021-04-21\",\n      }",{"title":4124,"description":4119,"authors":4125,"heroImage":4120,"date":4126,"body":4127,"category":14,"tags":4128},"Tips for productive DevOps workflows: JSON formatting with jq and CI/CD linting automation",[1504],"2021-04-21","\n\n## What is JSON linting?\n\nTo understand JSON linting, let’s quickly break down the two concepts of JSON and linting. \n\n***JSON*** is an acronym for JavaScript Object Notation, which is a lightweight, text-based, open standard format designed specifically for representing structured data based on the JavaScript object syntax. It is most commonly used for transmitting data in web applications. It parses data faster than XML and is easy for humans to read and write.\n\n***Linting*** is a process that automatically checks and analyzes static source code for programming and stylistic errors, bugs and suspicious constructs. \n\nJSON has become popular because it is human-readable and doesn’t require a complete markup structure like XML. It is easy to analyze into logical syntactic components, especially in JavaScript. It also has many JSON libraries for most programming languages.\n\n### Benefits of JSON linting\n\nFinding an error in JSON code can be challenging and time-consuming. The best way to find and correct errors while simultaneously saving time is to use a linting tool. When Json code is copied and pasted into the linting editor, it validates and reformats Json. It is easy to use and supports a wide range of browsers, so applications development with Json coding don’t require a lot of effort to make them browser-compatible.\n\nJSON linting is an efficient way to reduce errors and it improves the overall quality of the JSON code. This can help accelerate development and reduce costs because errors are discovered earlier.\n\n### Some common JSON linting errors\n\nIn instances where a JSON transaction fails, the error information is conveyed to the user by the API gateway. By default, the API gateway returns a very basic fault to the client when a message filter has failed.\n\nOne common JSON linting error is parsing. A “parse: unexpected character\" error occurs when passing a value that is not a valid JSON string to the JSON. parse method, for example, a native JavaScript object. To solve the error, make sure to only pass valid JSON strings to the JSON.\n\nAnother common error is NULL or inaccurate data errors, not using the right data type per column or extension for JSON files, and not ensuring every row in the JSON table is in the JSON format.\n\n### How to fix JSON linting errors\n\nIf you encounter a NULL or inaccurate data error in parsing, the first step is to make sure you use the right data type per column. For example, in the case of “age,” use 12 instead of twelve.\n\nAlso make sure you are using the right extension for JSON files. When using a compressed JSON file, it must end with “json” followed by the extension of the format, such as “.gz.”\n\nNext, make sure the JSON format is used for every row in the JSON table. Create a table with a delimiter that is not in the input files. Then, run a query equivalent to the return name of the file, row points and the file path for the null NSON rows.\n\nSometimes you may find files that are not your source code files, but ones generated by the system when compiling your project. In that instance, when the file has a .js extension, the ESLint needs to exclude that file when searching for errors. One method of doing this is by using ‘IgnorePatterns:’ in .eslintrc.json file either after or before the “rules” tag.\n\n“ignorePatterns”: [“temp.js”, “**/vendor/*.js”],\n\n“rules”: {\n\nAlternatively, you can create a separate file named‘.eslintignore’ and incorporate the files to be excluded as shown below :\n**/*.js\nIf you opt to correct instead of ignore, look for the error code in the last column. Correct all the errors in one fule and rerun ‘npx eslint . >errfile’ and ensure all the errors of that type are cleared. Then look for the next error code and repeat the procedure until all errors are cleared.\n\nOf course, there will be instances when you won’t understand an error, so in that case, open [https://eslint.org/docs/user-guide/getting-started](https://eslint.org/docs/user-guide/getting-started) and type the error code in the ‘Search’ field on the top of the document. There you will find very detailed instructions as to why that error is raised and how to fix it.\n\nFinally, you can forcibly fix errors automatically while generating the error list using:\n\nNpx eslintrc . — fix \n\nThis is not recommended until you become more well-versed with lint errors and how to fix them. Also, you should keep a backup of the files you are linting because while fixing errors, certain code may get overwritten, which could cause your program to fail.\n\n## JSON linting best practices\n\nHere are some tips for helping your consumers use your output:\n\nFirst, always enclose the **Key** **:** **Value** pair within **double quotes**. It may be convenient (not sure how) to generate with Single quotes, but JSON parser don’t like to parse JSON objects with single quotes.\n\nFor numerical values, quotes are optional but it is a good idea to enclose them in double quotes.\n\nNext, don’t ever use hyphens in your key fields because it breaks python and scala parser. Instead use underscores (_). \n\nIt’s a good idea to always create a root element, especially when you’re creating a complicated JSON.\n\n\nModern web applications come with a REST API which returns JSON. The format needs to be parsed, and often feeds into scripts and service daemons polling the API for automation.\n\nStarting with a new REST API and its endpoints can often be overwhelming. Documentation may suggest looking into a set of SDKs and libraries for various languages, or instruct you to use `curl` or `wget` on the CLI to send a request. Both CLI tools come with a variety of parameters which help to download and print the response string, for example in JSON format.\n\nThe response string retrieved from `curl` may get long and confusing. It can require parsing the JSON format and filtering for a smaller subset of results. This helps with viewing the results on the CLI, and minimizes the data to process in scripts. The following example retrieves all projects from GitLab and returns a paginated result set with the first 20 projects:\n\n```shell\n$ curl \"https://gitlab.com/api/v4/projects\"\n```\n\n![Raw JSON as API response](https://about.gitlab.com/images/blogimages/devops-workflows-json-format-jq-ci-cd-lint/gitlab_api_response_raw_json.png){: .shadow}\n\nThe [GitLab REST API documentation](https://docs.gitlab.com/ee/api/#how-to-use-the-api) guides you through the first steps with error handling and authentication. In this blog post, we will be using the [Personal Access Token](https://docs.gitlab.com/ee/api/#personalproject-access-tokens) as the authentication method. Alternatively, you can use [project access tokens](https://docs.gitlab.com/ee/user/project/settings/project_access_tokens.html) for [automated authentication](https://docs.gitlab.com/ee/api/#authentication) that avoids the use of personal credentials.\n\n### REST API authentication\n\nSince not all endpoints are accessible with anonymous access they might require authentication. Try fetching user profile data with this request:\n\n```shell\n$ curl \"https://gitlab.com/api/v4/user\"\n{\"message\":\"401 Unauthorized\"}\n```\n\nThe API request against the `/user` endpoint requires to pass the personal access token into the request, for example, as a request header. To avoid exposing credentials on the terminal, you can export the token and its value into the user's environment. You can automate the variable export with ZSH and the [.env plugin](https://github.com/ohmyzsh/ohmyzsh/tree/master/plugins/dotenv) in your shell environment. You can also source the `.env` once in the existing shell environment.\n\n```shell\n$ vim ~/.env\n\nexport GITLAB_TOKEN=”...”\n\n$ source ~/.env\n```\n\nScripts and commands being run in your shell environment can reference the `$GITLAB_TOKEN` variable. Try querying the user API endpoint again, with adding the authorization header into the request:\n\n```shell\n$ curl -H \"Authorization: Bearer $GITLAB_TOKEN\" \"https://gitlab.com/api/v4/user\"\n```\n\nA reminder that only administrators can see the attributes of all users, and the individual can only see their user profile – for example, `email` is hidden from the public domain.\n\n### How to request responses in JSON\n\nThe [GitLab API provides many resources](https://docs.gitlab.com/ee/api/api_resources.html) and URL endpoints. You can manage almost anything with the API that you’d otherwise configure using the graphic user interface.\n\nAfter sending the [API request](https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_message), the [response message](https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Response_message) contains the body as string, for example as a [JSON content type](https://docs.gitlab.com/ee/api/#content-type). `curl` can provide more information about the response headers which is helpful for debugging. Multiple verbose levels enable the full debug output with `-vvv`:\n\n```shell\n$ curl -vvv \"https://gitlab.com/api/v4/projects\"\n[...]\n* SSL connection using TLSv1.2 / ECDHE-RSA-CHACHA20-POLY1305\n* ALPN, server accepted to use h2\n* Server certificate:\n*  subject: CN=gitlab.com\n*  start date: Jan 21 00:00:00 2021 GMT\n*  expire date: May 11 23:59:59 2021 GMT\n*  subjectAltName: host \"gitlab.com\" matched cert's \"gitlab.com\"\n*  issuer: C=GB; ST=Greater Manchester; L=Salford; O=Sectigo Limited; CN=Sectigo RSA Domain Validation Secure Server CA\n*  SSL certificate verify ok.\n[...]\n> GET /api/v4/projects HTTP/2\n> Host: gitlab.com\n> User-Agent: curl/7.64.1\n> Accept: */*\n[...]\n\u003C HTTP/2 200\n\u003C date: Mon, 19 Apr 2021 11:25:31 GMT\n\u003C content-type: application/json\n[...]\n[{\"id\":25993690,\"description\":\"project for adding issues\",\"name\":\"project-for-issues-1e1b6d5f938fb240\",\"name_with_namespace\":\"gitlab-qa-sandbox-group / qa-test-2021-04-19-11-13-01-d7d873fd43cd34b6 / project-for-issues-1e1b6d5f938fb240\",\"path\":\"project-for-issues-1e1b6d5f938fb240\",\"path_with_namespace\":\"gitlab-qa-sandbox-group/qa-test-2021-04-19-11-13-01-d7d873fd43cd34b6/project-for-issues-1e1b6d5f938fb240\"\n\n[... JSON content ...]\n\n\"avatar_url\":null,\"web_url\":\"https://gitlab.com/groups/gitlab-qa-sandbox-group/qa-test-2021-04-19-11-12-56-7f3128bd0e41b92f\"}}]\n* Closing connection 0\n```\n\nThe `curl` command output provides helpful insights into TLS ciphers and versions, the request lines starting with `>` and response lines starting with `\u003C`. The response body string is encoded as JSON.\n\n### How to see the structure of the returned JSON\n\nTo get a quick look at the structure of the returned JSON file, try these tips:\n\n* Enclose square brackets to identify an array `[ …. ]`.\n* Enclose curly brackets identify a [dictionary](https://en.wikipedia.org/wiki/Associative_array) `{ … }`. Dictionaries are also called associative arrays, maps, etc.\n* `”key”: value` indicates a key-value pair in a dictionary, which is identified by curly brackets enclosing the key-value pairs.\n\nThe values in [JSON](https://en.wikipedia.org/wiki/JSON) consist of specific types - a string value is put in double-quotes. Boolean true/false, numbers, and floating-point numbers are also present as types. If a key exists but its value is not set, REST APIs often return `null`.\n\nVerify the data structure by running \"linters\". Python's JSON module can parse and lint JSON strings. The example below misses a closing square bracket to showcase the error:\n\n```shell\n$ echo '[{\"key\": \"broken\"}' | python -m json.tool\nExpecting object: line 1 column 19 (char 18)\n```\n\n[jq](https://stedolan.github.io/jq/) – a lightweight and flexible CLI processor – can be used as a standalone tool to parse and validate JSON data.\n\n```shell\n$ echo '[{\"key\": \"broken\"}' | jq\nparse error: Unfinished JSON term at EOF at line 2, column 0\n```\n\n[`jq` is available](https://stedolan.github.io/jq/download/) in the package managers of most operating systems.\n\n```shell\n$ brew install jq\n$ apt install jq\n$ dnf install jq\n$ zypper in jq\n$ pacman -S jq\n$ apk add jq\n```\n\n### Dive deep into JSON data structures\n\nThe true power of `jq` lies in how it can be used to parse JSON data:\n\n> `jq` is like `sed` for JSON data. It can be used to slice, filter, map, and transform structured data with the same ease that `sed`, `awk`, `grep` etc., let you manipulate text.\n\nThe output below shows how it looks to run the request against the project API again, but this time, the output is piped to `jq`.\n\n```shell\n$ curl \"https://gitlab.com/api/v4/projects\" | jq\n[\n  {\n    \"id\": 25994891,\n    \"description\": \"...\",\n    \"name\": \"...\",\n\n[...]\n\n    \"forks_count\": 0,\n    \"star_count\": 0,\n    \"last_activity_at\": \"2021-04-19T11:50:24.292Z\",\n    \"namespace\": {\n      \"id\": 11528141,\n      \"name\": \"...\",\n\n[...]\n\n    }\n  }\n]\n```\n\nThe first difference is the format of the JSON data structure, so-called [pretty-printed](https://en.wikipedia.org/wiki/Prettyprint). New lines and indents in data structure scopes help your eyes and allow you to identify the inner and outer data structures involved. This format is needed to determine which `jq` filters and methods you want to apply next.\n\n#### About arrays and dictionaries\n\nThe set of results from an API often is returned as a list (or \"array\") of items. An item itself can be a single value or a JSON object. The following example mimics the response from the GitLab API and creates an array of dictionaries as a nested result set.\n\n```shell\n$ vim result.json\n[\n  {\n    \"id\": 1,\n    \"name\": \"project1\"\n  },\n  {\n    \"id\": 2,\n    \"name\": \"project2\"\n  },\n  {\n    \"id\": 3,\n    \"name\": \"project-internal-dev\",\n    \"namespace\": {\n      \"name\": \"🦊\"\n    }\n  }\n]\n```\n\nUse `cat` to print the file content on stdout and pipe it into `jq`. The outer data structure is an array – use `-c .[]` to access and print all items.\n\n```shell\n$ cat result.json | jq -c '.[]'\n{\"id\":1,\"name\":\"project1\"}\n{\"id\":2,\"name\":\"project2\"}\n{\"id\":3,\"name\":\"project-internal-dev\",\"namespace\":{\"name\":\"🦊\"}}\n```\n\n### How to filter data structures with `jq`\n\nFilter items by passing `| select (...)` to `jq`. The filter takes a lambda callback function as a comparator condition. When the item matches the condition, it is returned to the caller.\n\nUse the dot indexer `.` to access dictionary keys and their values. Try to filter for all items where the name is `project2`:\n\n```shell\n$ cat result.json | jq -c '.[] | select (.name == \"project2\")'\n{\"id\":2,\"name\":\"project2\"}\n```\n\nPractice this example by selecting the `id` with the value `2` instead of the `name`.\n\n#### Filter with matching a string\n\nDuring tests, you may need to match different patterns instead of knowing the full name. Think of projects that match a specific path or are located in a group where you only know the prefix. Simple string matches can be achieved with the `| contains (...)` function. It allows you to check whether the given string is inside the target string – which requires the selected attribute to be of the string type.\n\nFor a filter with the select chain, the comparison condition needs to be changed from the equal operator `==` to checking the attribute `.name` with `| contains (\"dev\")`.\n\n```shell\n$ cat result.json | jq -c '.[] | select (.name | contains (\"dev\") )'\n{\"id\":3,\"name\":\"project-internal-dev\",\"namespace\":{\"name\":\"🦊\"}}\n```\n\nSimple matches can be achieved with the `contains` function.\n\n#### Filter with matching regular expressions\n\nFor advanced string pattern matching, it is recommended to use regular expressions. `jq` provides the [test function for this use case](https://stedolan.github.io/jq/manual/#RegularexpressionsPCRE). Try to filter for all projects which end with a number, represented by `\\d+`. Note that the backslash `\\` needs to be escaped as `\\\\` for shell execution. `^` tests for beginning of the string, `$` is the ending check.\n\n```shell\n$ cat result.json | jq -c '.[] | select (.name | test (\"^project\\\\d+$\") )'\n{\"id\":1,\"name\":\"project1\"}\n{\"id\":2,\"name\":\"project2\"}\n```\n\nTip: You can [test and build the regular expression with regex101](https://regex101.com/) before test-driving it with `jq`.\n\n#### Access nested values\n\nKey value pairs in a dictionary may have a dictionary or array as a value. `jq` filters need to take this factor into account when filtering or transforming the result. The example data structure provides `project-internal-dev` which has the key `namespace` and a value of a dictionary type.\n\n```shell\n  {\n    \"id\": 3,\n    \"name\": \"project-internal-dev\",\n    \"namespace\": {\n      \"name\": \"🦊\"\n    }\n  }\n```\n\n`jq` allows the user to specify the [array and dictionary types](https://stedolan.github.io/jq/manual/#TypesandValues) as `[]` and `{}` to be used in select chains with greater and less than comparisons. The `[]` brackets select filters for non-empty dictionaries for the `namespace` attribute, while the `{}` brackets select for all `null` (raw JSON) values.\n\n```shell\n$ cat result.json | jq -c '.[] | select (.namespace >={} )'\n{\"id\":3,\"name\":\"project-internal-dev\",\"namespace\":{\"name\":\"🦊\"}}\n\n$ cat result.json | jq -c '.[] | select (.namespace \u003C={} )'\n{\"id\":1,\"name\":\"project1\"}\n{\"id\":2,\"name\":\"project2\"}\n```\n\nThese methods can be used to access the name attribute of the namespace, but only if the namespace contains values. Tip: You can chain multiple `jq` calls by piping the result into another `jq` call. `.name` is a subkey of the primary `.namespace` key.\n\n```shell\n$ cat result.json | jq -c '.[] | select (.namespace >={} )' | jq -c '.namespace.name'\n\"🦊\"\n```\n\nThe additional select command with non-empty namespaces ensures that only initialized values for `.namespace.name` are returned. This is a safety check, and avoids receiving `null` values in the result you would need to filter again.\n\n```shell\n$ cat result.json| jq -c '.[]' | jq -c '.namespace.name'\nnull\nnull\n\"🦊\"\n```\n\nBy using the additional check with `| select (.namespace >={} )`, you only get the expected results and do not have to filter empty `null` values.\n\n### How to expand the GitLab endpoint response\n\nSave the result from the API projects call and retry the examples above with `jq`.\n\n```shell\n$ curl \"https://gitlab.com/api/v4/projects\" -o result.json 2&>1 >/dev/null\n```\n\n### Validate CI/CD YAML with `jq` for Git hooks\n\nWhile writing this blog post, I learned that you can [escape and encode YAML into JSON with `jq`](https://docs.gitlab.com/ee/api/lint.html#escape-yaml-for-json-encoding). This trick comes in handy when automating YAML linting on the CLI, for example as a Git pre-commit hook.\n\nLet’s take a look at the simplest way to test GitLab CI/CD from our [community meetup workshops](https://gitlab.com/gitlab-de/swiss-meetup-2021-jan#resources). A common mistake with the first steps of the process can be missing the two spaces indent or missing whitespace between the dash and following command. The following examples use `.gitlab-ci.error.yml` as a filename to showcase errors and `.gitlab-ci.main.yml` for working examples.\n\n```shell\n$ vim .gitlab-ci.error.yml\n\nimage: alpine:latest\n\ntest:\nscript:\n  -exit 1\n```\n\nCommitting the change and waiting for the CI/CD pipeline to validate at runtime can be time-consuming. The [GitLab API provides a resource endpoint /ci/lint](https://docs.gitlab.com/ee/api/lint.html#validate-the-ci-yaml-configuration). A POST request with JSON-encoded YAML content will return a linting result faster.\n\n#### Parse CI/CD YAML into JSON with jq\n\nYou can use jq to parse the raw YAML string into JSON:\n\n```shell\n$ jq --raw-input --slurp \u003C .gitlab-ci.error.yml\n\"image: alpine:latest\\n\\ntest:\\nscript:\\n  -exit 1\\n\"\n```\n\nThe `/ci/lint` API endpoint requires a JSON dictionary with `content` as key, and the raw YAML string as a value. You can use `jq` to format the input by using the arg parser:\n\n```shell\n§ jq --null-input --arg yaml \"$(\u003C.gitlab-ci.error.yml)\" '.content=$yaml'\n{\n  \"content\": \"image: alpine:latest\\n\\ntest:\\nscript:\\n  -exit 1\"\n}\n```\n\n#### Send POST request to /ci/lint\n\nThe next building block is to [send a POST request to the /ci/lint](https://docs.gitlab.com/ee/api/lint.html#validate-the-ci-yaml-configuration). The request needs to specify the `Content-Type` header for the body. With using the pipe `|` character, the JSON-encoded YAML configuration is fed into the curl command call.\n\n```shell\n$ jq --null-input --arg yaml \"$(\u003C.gitlab-ci.error.yml)\" '.content=$yaml' \\\n| curl \"https://gitlab.com/api/v4/ci/lint?include_merged_yaml=true\" \\\n--header 'Content-Type: application/json' --data @-\n{\"status\":\"invalid\",\"errors\":[\"jobs test config should implement a script: or a trigger: keyword\",\"jobs script config should implement a script: or a trigger: keyword\",\"jobs config should contain at least one visible job\"],\"warnings\":[],\"merged_yaml\":\"",[832,894,726],{"slug":4130,"featured":6,"template":678},"devops-workflows-json-format-jq-ci-cd-lint","content:en-us:blog:devops-workflows-json-format-jq-ci-cd-lint.yml","Devops Workflows Json Format Jq Ci Cd Lint","en-us/blog/devops-workflows-json-format-jq-ci-cd-lint.yml","en-us/blog/devops-workflows-json-format-jq-ci-cd-lint",{"_path":4136,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4137,"content":4143,"config":4148,"_id":4150,"_type":16,"title":4151,"_source":17,"_file":4152,"_stem":4153,"_extension":20},"/en-us/blog/gitlab-jira-integration-selfmanaged",{"title":4138,"description":4139,"ogTitle":4138,"ogDescription":4139,"noIndex":6,"ogImage":4140,"ogUrl":4141,"ogSiteName":692,"ogType":693,"canonicalUrls":4141,"schema":4142},"How to achieve a GitLab Jira integration","Check out how to integrate GitLab self-managed with Atlassian Jira to connect your merge requests, branches, and commits to a Jira issue.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667260/Blog/Hero%20Images/twopeasinapod.jpg","https://about.gitlab.com/blog/gitlab-jira-integration-selfmanaged","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to achieve a GitLab Jira integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tye Davis\"}],\n        \"datePublished\": \"2021-04-12\",\n      }",{"title":4138,"description":4139,"authors":4144,"heroImage":4140,"date":4145,"body":4146,"category":14,"tags":4147},[3971],"2021-04-12","\n_This is the second in a series of posts on GitLab Jira integration strategies. The [first post](/blog/integrating-gitlab-com-with-atlassian-jira-cloud/) explains how to integrate GitLab.com with Jira Cloud._\n\nThe advantages of a GitLab Jira integration are clear:\n\n* One GitLab project integrates with all the Jira projects in a single Jira instance. \n* Quickly navigate to Jira issues from GitLab. \n* Detect and link to Jira issues from GitLab commits and merge requests. \n* Log GitLab events in the associated Jira issue. \n* Automatically close (transition) Jira issues with GitLab commits and merge requests.\n\nHere's a step-by-step guide of everything you need to know to achieve a GitLab Jira integration.\n\n## Pre-configuration\n\nAs you approach configuring your GitLab project to Jira, you can choose from two options that best fit your company or organization's needs.  You can either:\n\n* Use a service template by having a GitLab administrator provide default values for configuring integrations at the project level. When enabled, the defaults are applied to all projects that do not already have the integration enabled or do not otherwise have custom values enabled. The Jira integration values are all pre-filled on each project's configuration page for jira integration. If you disable the template, these values no longer appear as defaults, while any values already saved for an integration remain unchanged.\n\n* Configure integrations at a specific project level that will contain custom values specific to that project and that project alone.\n\nIt should be noted that each GitLab project can be configured to connect to an entire Jira instance. That means one GitLab project can interact with all Jira projects in that instance, once configured. Therefore, you will not have to explicitly associate a GitLab project with any single Jira project.\n\nGitLab offers several different options that allow you to integrate Jira in a way that best fits you and your team's needs based on how you’ve set up your Jira software. Let’s take a deeper look into how to set-up each of these available options.\n\n## How to configure Jira\n\nThe first step in setting up your Gitlab Jira integration is having your Jira configuration in order. \n\n**Jira Server** supports basic authentication. When connecting, a username and password are required. Note that connecting to Jira Server via CAS is not possible. Set up a user in Jira Server first and then proceed to Configuring GitLab.\n\n**Jira Cloud** supports authentication through an API token, and in order to begin the process you need to start by creating one within Jira. When connecting to Jira Cloud, an email and API token are required. Set up a user in Jira Cloud first and then proceed to Configuring GitLab. \n\nCreate an API token here: https://id.atlassian.com/manage-profile/security/api-tokens  \n\n* Log in to id.atlassian.com with your email address. It is important that the user associated with this email address has write access to projects in Jira\n\n* Click Create API token.\n\n![Create API Token in Jira](https://about.gitlab.com/images/blogimages/atlassianjira/createjiratoken.png){: .shadow.medium.center}\nJira API token creation\n{: .note.text-center}\n\n* Click Copy, or click View and write down the new API token. It is required when configuring GitLab.\n\n![Copy API Token](https://about.gitlab.com/images/blogimages/atlassianjira/copyjiratoken.png){: .shadow.medium.center}\nJira API token copy to clipboard\n{: .note.text-center}\n\n## How to configure GitLab\n\nAs mentioned above, you can begin setting up the Jira integration either by using a service template that defaults all GitLab projects to pre-fill Jira values or you can set up at an individual project level. \n\nTo set up a service template:\n\n* 1a. Navigate to the Admin Area > Service Templates and choose the Jira service template.\n\n![GitLab Service Templates](https://about.gitlab.com/images/blogimages/atlassianjira/GitLabServiceTemplates.png){: .shadow.medium.center}\nGitLab Service Templates\n{: .note.text-center}\n\n2a. For each project, you will still need to configure the issue tracking URLs by replacing :issues_tracker_id in the above screenshot with the ID used by your external issue tracker.\n\n![Issue Tracker ID](https://about.gitlab.com/images/blogimages/atlassianjira/issuetrackerid.png){: .shadow.medium.center}\nIssue Tracker ID\n{: .note.text-center}\n\nTo set up a individual project template:\n\n* 1b. To enable the Jira integration in a project, navigate to the Integrations page and click the Jira service.\n\n![Enable Jira Integration](https://about.gitlab.com/images/blogimages/atlassianjira/enablejiraintegration.png){: .shadow.medium.center}\nEnable Jira Integration\n{: .note.text-center}\n\n* 2b. Select a Trigger action. This determines whether a mention of a Jira issue in GitLab commits, merge requests, or both, should link the Jira issue back to that source commit/MR and transition the Jira issue, if indicated.\n\n![Select Trigger Action](https://about.gitlab.com/images/blogimages/atlassianjira/selecttriggeraction.png){: .shadow.medium.center}\nSelect Trigger Action\n{: .note.text-center}\n\n* 3b. To include a comment on the Jira issue when the above reference is made in GitLab, check Enable comments.\n\n* 3c.  Enter the further details on the page as described in the following table:\n\n| Field | Description |\n|-------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| Web URL | The base URL to the Jira instance web interface which is being linked to this GitLab project. E.g.,  https://jira.example.com. |\n| Jira API URL | The base URL to the Jira instance API. Web URL value will be used if not set. E.g.,  https://jira-api.example.com. Leave this field blank (or use the same value of Web URL) if using Jira Cloud.|\n| Username or Email | Use username for Jira Server or email for Jira Cloud |\n| Transition ID | Required for closing Jira issues via commits or merge requests. This is the ID of a transition in Jira that moves issues to a desired state. If you insert multiple transition IDs separated by , or;, the issue is moved to each state, one after another, using the given order. (See below for obtaining a transition ID) |\n\nIn order to obtain a transition ID, do the following:\n* By using the API, with a request like https://yourcompany.atlassian.net/rest/api/2/issue/ISSUE-123/transitions using an issue that is in the appropriate “open” state\n\n*Note: The transition ID may vary between workflows (e.g., bug vs. story), even if the status you are changing to is the same.*\n\n![Transition ID](https://about.gitlab.com/images/blogimages/atlassianjira/transitionid.png){: .shadow.medium.center}\nTransition ID\n{: .note.text-center}\n\nYour GitLab project can now interact with all Jira projects in your instance and the project now displays a Jira link that opens the Jira project.\n\nWhen you have configured all settings, click **Test settings and save changes.** \n\n![Test settings and save changes](https://about.gitlab.com/images/blogimages/atlassianjira/testsettingsandsavechanges.png){: .shadow.medium.center}\nTest settings and save changes\n{: .note.text-center}\n\nIt should be noted that you can only display issues from a single Jira project within a given GitLab project.\n\nThe integration is now **activated:**\n\n![Active Jira Integration](https://about.gitlab.com/images/blogimages/atlassianjira/activeintegration.png){: .shadow.medium.center}\nActive Jira Integration\n{: .note.text-center}\n\n## Jira Issues\n\nBy now you should have [configured Jira](https://docs.gitlab.com/ee/integration/jira/index.html#configuring-jira) and enabled the [Jira service in GitLab](https://docs.gitlab.com/ee/integration/jira/index.html#configuring-gitlab). If everything is set up correctly you should be able to reference and close Jira issues by just mentioning their ID in GitLab commits and merge requests.\n\nJira issue IDs must be formatted in uppercase for the integration to work.\n\n### 1.How to reference Jira issues\n\nWhen GitLab project has Jira issue tracker configured and enabled, mentioning Jira issue in GitLab will automatically add a comment in Jira issue with the link back to GitLab. This means that in comments in merge requests and commits referencing an issue, e.g., PROJECT-7, will add a comment in Jira issue in the format:\n\nUSER mentioned this issue in RESOURCE_NAME of [PROJECT_NAME|LINK_TO_COMMENT]:\nENTITY_TITLE\n\n* USER A user that mentioned the issue. This is the link to the user profile in GitLab.\n* LINK_TO_THE_COMMENT Link to the origin of mention with a name of the entity where Jira issue was mentioned.\n* RESOURCE_NAME Kind of resource which referenced the issue. Can be a commit or merge request.\n* PROJECT_NAME GitLab project name.\n* ENTITY_TITLE Merge request title or commit message first line.\n\n![Reference Jira issues](https://about.gitlab.com/images/blogimages/atlassianjira/issuelinks.png){: .shadow.medium.center}\nReference Jira issues\n{: .note.text-center}\n\nFor example, the following commit will reference the Jira issue with PROJECT-1 as its ID:\n\ngit commit -m \"PROJECT-1 Fix spelling and grammar\"\n\nClosing Jira Issues\n\nJira issues can be closed directly from GitLab when you push code by using trigger words in commits and merge requests. When a commit which contains the trigger word followed by the Jira issue ID in the commit message is pushed, GitLab will add a comment in the mentioned Jira issue and immediately close it (provided the transition ID was set up correctly).\n\nThere are currently three trigger words, and you can use either one to achieve the same goal:\n* Resolves PROJECT-1\n* Closes PROJECT-1\n* Fixes PROJECT-1\n\nwhere PROJECT-1 is the ID of the Jira issue.\n\nNotes:\n\n* Only commits and merges into the project’s default branch (usually main or master) will close an issue in Jira. You can change your projects default branch under project settings.\n\n* The Jira issue will not be transitioned if it has a resolution.\n\nLet’s consider the following example:\n\n* For the project named PROJECT in Jira, we implemented a new feature and created a merge request in GitLab.\n* This feature was requested in Jira issue PROJECT-7 and the merge request in GitLab contains the improvement\n* In the merge request description we use the issue closing trigger Closes PROJECT-7.\n* Once the merge request is merged, the Jira issue will be automatically closed with a comment and an associated link to the commit that resolved the issue.\n\nIn the following screenshot you can see what the link references to the Jira issue look like.\n\n![GitLab link references](https://about.gitlab.com/images/blogimages/atlassianjira/linkreferences.png){: .shadow.medium.center}\nGitLab link references\n{: .note.text-center}\n\nOnce this merge request is merged, the Jira issue will be automatically closed with a link to the commit that resolved the issue.\n\n![Jira Issue auto closes when GitLab MR merges](https://about.gitlab.com/images/blogimages/atlassianjira/jiraautoclose.png){: .shadow.medium.center}\nJira Issue auto closes when GitLab MR merges\n{: .note.text-center}\n\n## Development Panel Integration Set-Up\n\n### A. Jira DVCS configuration\n\nWhen using the Jira DVCS configuration, there are several different configurations you can make that are dependent on how your Jira/GitLab instances are managed.\n\n* If you are using self-managed GitLab, make sure your GitLab instance is accessible by Jira.\n* If you’re connecting to Jira Cloud, ensure your instance is accessible through the internet.\n* If you are using Jira Server, make sure your instance is accessible however your network is set up.\n\n### B. GitLab account configuration for DVCS\n\n* In GitLab, create a new application to allow Jira to connect with your GitLab account.\nWhile signed in to the GitLab account that you want Jira to use to connect to GitLab, click your profile avatar at the top right, and then click Settings > Applications. Use the form to create a new application.\n\n* In the Name field, enter a descriptive name for the integration, such as Jira.\nFor the Redirect URI field, enter https://\u003Cgitlab.example.com>/login/oauth/callback, replacing \u003Cgitlab.example.com> with your GitLab instance domain. For example, if you are using GitLab.com, this would be https://gitlab.com/login/oauth/callback.\nNote: If using a GitLab version earlier than 11.3, the Redirect URI must be https://\u003Cgitlab.example.com>/-/jira/login/oauth/callback. If you want Jira to have access to all projects, GitLab recommends that an administrator create the application.\n\n![Admin Creates Integration](https://about.gitlab.com/images/blogimages/atlassianjira/admincreates.png){: .shadow.medium.center}\nAdmin Creates Integration\n{: .note.text-center}\n\n* Check API in the Scopes section and uncheck any other checkboxes.\n\n* Click Save application. GitLab displays the generated Application ID and Secret values. Copy these values, which you will use in Jira.\n\n*Tip: To ensure that regular user account maintenance doesn’t impact your integration, create and use a single-purpose jira user in GitLab.*\n\n## Jira DVCS Connector setup\n\nNote: If you’re using GitLab.com and Jira Cloud, we recommend you use the [GitLab for Jira app](https://docs.gitlab.com/ee/integration/jira/index.html), unless you have a specific need for the DVCS Connector.\n\n* Ensure you have completed the [GitLab configuration](https://docs.gitlab.com/ee/integration/jira/index.html).\n\n![Check api in Applications](https://about.gitlab.com/images/blogimages/atlassianjira/checkapi.png){: .shadow.medium.center}\nCheck api in Applications\n{: .note.text-center}\n\n![Application was created successfully](https://about.gitlab.com/images/blogimages/atlassianjira/applicationsuccessful.png){: .shadow.medium.center}\nApplication was created successfully\n{: .note.text-center}\n\n* If you’re using Jira Server, go to Settings (gear) > Applications > DVCS accounts. If you’re using Jira Cloud, go to Settings (gear) > Products > DVCS accounts.\n\n![Go to DVCS in Settings](https://about.gitlab.com/images/blogimages/atlassianjira/dvcssettings.png){: .shadow.medium.center}\nGo to DVCS in Settings\n{: .note.text-center}\n\n* Click Link GitHub Enterprise account to start creating a new integration. (We’re pretending to be GitHub in this integration, until there’s additional platform support in Jira.)\n\n![Click Link to start new integration](https://about.gitlab.com/images/blogimages/atlassianjira/dvcsaccount.png){: .shadow.medium.center}\nClick Link to start new integration\n{: .note.text-center}\n\n* Complete the form:\nSelect GitHub Enterprise for the Host field.\nIn the Team or User Account field, enter the relative path of a top-level GitLab group that you have access to, or the relative path of your personal namespace.\n\n![Add new account](https://about.gitlab.com/images/blogimages/atlassianjira/addnewaccount.png){: .shadow.medium.center}\nAdd new account\n{: .note.text-center}\n\nIn the Host URL field, enter https://\u003Cgitlab.example.com>/, replacing \u003Cgitlab.example.com> with your GitLab instance domain. For example, if you are using GitLab.com, this would be https://gitlab.com/.\n\n*Note: If using a GitLab version earlier than 11.3 the Host URL value should be https://\u003Cgitlab.example.com>/-/jira*\n\nFor the Client ID field, use the Application ID value from the previous section.\n\nFor the Client Secret field, use the Secret value from the previous section.\n\nEnsure that the rest of the checkboxes are checked.\n\n* Click Add to complete and create the integration.\nJira takes up to a few minutes to know about (import behind the scenes) all the commits and branches for all the projects in the GitLab group you specified in the previous step. These are refreshed every 60 minutes.\n\nIn the future, we plan on implementing real-time integration. If you need to refresh the data manually, you can do this from the Applications -> DVCS accounts screen where you initially set up the integration:\n\n![Refresh data manually](https://about.gitlab.com/images/blogimages/atlassianjira/refreshdata.png){: .shadow.medium.center}\nRefresh data manually\n{: .note.text-center}\n\nTo connect additional GitLab projects from other GitLab top-level groups (or personal namespaces), repeat the previous steps with additional Jira DVCS accounts.\n\nFor troubleshooting your DVCS connection, go to [GitLab Docs](https://docs.gitlab.com/ee/integration/jira/index.html) for more information.\n\n_In our next blog post we'll look at [Usage](https://docs.gitlab.com/ee/integration/jira_development_panel.html#usage)._\n\nCover image by [Mikołaj Idziak](https://unsplash.com/@mikidz) on [Unsplash](https://unsplash.com/photos/nwjRmbXbLgw).\n{: .note.text-left}\n",[873,894,937],{"slug":4149,"featured":6,"template":678},"gitlab-jira-integration-selfmanaged","content:en-us:blog:gitlab-jira-integration-selfmanaged.yml","Gitlab Jira Integration Selfmanaged","en-us/blog/gitlab-jira-integration-selfmanaged.yml","en-us/blog/gitlab-jira-integration-selfmanaged",{"_path":4155,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4156,"content":4162,"config":4167,"_id":4169,"_type":16,"title":4170,"_source":17,"_file":4171,"_stem":4172,"_extension":20},"/en-us/blog/demystifying-ci-cd-variables",{"title":4157,"description":4158,"ogTitle":4157,"ogDescription":4158,"noIndex":6,"ogImage":4159,"ogUrl":4160,"ogSiteName":692,"ogType":693,"canonicalUrls":4160,"schema":4161},"GitLab environment variables demystified","CI/CD variables are useful (and flexible) tools to control jobs and pipelines. We unpack everything you need to know about GitLab environment variables.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664679/Blog/Hero%20Images/blog-image-template-1800x945__24_.png","https://about.gitlab.com/blog/demystifying-ci-cd-variables","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab environment variables demystified\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Veethika Mishra\"}],\n        \"datePublished\": \"2021-04-09\",\n      }",{"title":4157,"description":4158,"authors":4163,"heroImage":4159,"date":4164,"body":4165,"category":14,"tags":4166,"updatedDate":790},[1140],"2021-04-09","There is a lot of flexibility when it comes to defining and using variables for [CI/CD](https://about.gitlab.com/topics/ci-cd/). Variables are extremely useful for controlling jobs and pipelines, and they help you avoid hard-coding values in your `.gitlab-ci.yml` configuration file. The information in this post should weave a larger picture by bringing together all (or most) of the information around defining and handling variables, making it easier to understand the scope and capabilities. Relevant documentation is linked throughout the post.\n\nIn [GitLab CI/CD](https://docs.gitlab.com/ee/ci/), variables can be used to customize jobs by defining and storing values. When using variables there is no need to hard code values. In GitLab, CI/CD variables can be defined by going to **Settings >> CI/CD >> Variables**, or by simply defining them in the `.gitlab-ci.yml` file.\n\nVariables are useful for configuring third-party services for different deployment environments, such as `testing`, `staging`, `production`, etc. Modify the services attached to those environments by simply changing the variable that points to the API endpoint the services need to use. Also use variables to configure jobs and then make them available as environment variables within the jobs when they run.\n\n![GitLab reads the .gitlab-ci.yml file to scan the referenced variable and sends the information to the GitLab Runner. The variables are exposed on and output by the runner.](https://about.gitlab.com/images/blogimages/demystifying-ci-cd-variables/variables_processing.jpeg)\n\n## The relationship between variables and environments\n\nSoftware development as a process includes stages to test a product before rolling it out to users. [Environments](https://docs.gitlab.com/ee/ci/environments/) are used to define what those stages look like and it may differ between teams and organizations.\n\nOn the other hand, variables are data values that are likely to change as a result of user interaction with a product. For example, their age, preference, or any input you could possibly think of that might determine their next step in the product task-flow.\n\nWe often hear the term [environment variable](https://docs.gitlab.com/ee/administration/environment_variables.html). These are variables that are defined in a given environment, but outside the application. GitLab CI/CD variables provide developers with the ability to configure values in their code. Using variables is helpful because it ensures that the code is flexible. GitLab CI/CD variables allow users to modify an application deployed to a certain environment without making any change to code. It is simple to run tests or even integrate third-party services by changing a configuration environment variable outside the application.\n\n## The scope of variables for CI/CD\n\n![Order of precedence for CI/CD variables: 1) Manual pipeline run, trigger and schedule pipeline variables, 2) Project level, group level, instance level protected variables, 3) Inherited CI/CD variables, 4) Job level, global yml defined variables, 5) Deployment variables, 6) Pre-defined CI/CD variables](https://about.gitlab.com/images/blogimages/demystifying-ci-cd-variables/variables_precedence.jpeg)\n\n### `.gitlab-ci.yml` defined variables\n\nVariables that need to be available in the job environment can be added to GitLab. These CI/CD variables are meant to store non-sensitive project configuration, like the database URL in the `.gitlab-ci.yml` file. Reuse this variable in multiple jobs or scripts, wherever the value is needed. If the value changes, you only need to update the variable once, and the change is reflected everywhere the variable is used.\n\n### Project CI/CD variables\n\nMoving a step above the repository-specific requirements, you can define CI/CD variables in [project settings](https://docs.gitlab.com/ee/ci/variables/#for-a-project), which makes them available to CI/CD pipelines. These are stored out of the repository (not in the `.gitlab-ci.yml` file), but are still available to use in the CI/CD configuration and scripts. Storing the variables outside the `.gitlab-ci.yml` file keeps these values limited to a project-only scope, and not saved in plain text in the project.\n\n### Group and instance CI/CD variables\n\nSome variables are relevant at the group level, or even instance level, and could be useful to all projects in a group or instance. Define the variables in the [group or instance settings](https://docs.gitlab.com/ee/ci/variables/#for-a-group) so all projects within those scopes can use the variables without actually needing to know the value  or having to create the variables for the lower scope. For example, a common value that needs to be updated in multiple projects can be easily managed if it stays up-to-date in a single place. Alternatively, multiple projects could use a specific password without actually needing to know the value of the password itself.\n\n## Jobs and pipelines as environments\n\nGitLab CI/CD variables, besides being used as environment variables, also work in the scope of the `.gitlab-ci.yml` configuration file to configure pipeline behavior, unrelated to any environment. The variables can be stored in the project/group/instance settings and be made available to jobs in pipelines.\n\nFor example:\n\n```  \njob:  \n  rules:  \n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH  \n  script:  \n  - echo \"This job ran on the $CI_COMMIT_BRANCH branch.\"  \n```\n\nThe variable `($CI_COMMIT_BRANCH)` in the script section runs in the scope of the job in which it was defined. This scope is the \"job environment\" – meaning, when the job starts, the GitLab runner starts up a Docker container and runs the job in that environment. The runner will make that variable (and all other predefined or custom variables) available to the job, and it can display their value in the log output if needed.\n\nBut the variable is **also** used in the `if:` section to determine when the job should run. That in itself is not an environment, which is why we call these CI/CD variables. They can be used to dynamically configure your CI/CD jobs, **as well** as be used as environment variables when the job is running.\n\n## Predefined variables\n\nA number of variables are [predefined](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html) when a GitLab CI/CD pipeline starts. A user can immediately access values for things like commit, project, or pipeline details without needing to define the variables themselves.\n\n## Custom CI/CD variables\n\n![Runners can create two kinds of custom CI/CD variables: Type and File.](https://about.gitlab.com/images/blogimages/demystifying-ci-cd-variables/variable_types.jpeg)\n\nWhen creating a CI/CD variable in the settings, GitLab gives the user more configuration options for the variable. Use these extra configuration options for stricter control over more sensitive variables:\n\n**Environment scope:** If a variable only ever needs to be used in one specific environment, set it to only ever be available in that environment. For example, you can set a deploy token to only be available in the `production` environment.\n\n**Protected variables:** Similar to the environment scope, you can set a variable to be available only when the pipeline runs on a protected branch, like your default branch.\n\n**Variable type:** A few applications require configuration to be passed to it in the form of a file. If a user has an application that requires this configuration, just set the type of variable as a \"File\". Configuring the CI/CD variable this way means that when the runner makes the variable available in the environment, it actually writes it out to a temporary file, and stores the path to the file as the value. Next, a user can pass the path to the file to any applications that need it.\n\nAlong with the listed ways of defining and using variables, GitLab introduced a feature that generates pre-filled variables when there's a need to run a pipeline manually. Prefilled variables reduce the chances of running into an error and makes running the pipeline easier.\n\n**Masked variables:** [Masked variables](https://docs.gitlab.com/ee/ci/variables/#mask-a-cicd-variable) are CI variables that have been **hidden in job logs** to prevent the variable’s value from being displayed. \n\n**Masked and hidden variables:** Introduced in [GitLab 17.4](https://about.gitlab.com/releases/2024/09/19/gitlab-17-4-released/#hide-cicd-variable-values-in-the-ui), [Masked and hidden](https://docs.gitlab.com/ee/ci/variables/#hide-a-cicd-variable) variables provide the same masking feature from job logs and **keep the value hidden** **in the Settings UI**. We do not recommend using either of these variables for sensitive data (e.g. secrets) as they can be inadvertently exposed. \n\n## Secrets\n\nA secret is a sensitive credential that should be kept confidential. Examples of a secret include:\n\n* Passwords  \n* SSH keys  \n* Access tokens  \n* Any other types of credentials where exposure would be harmful to an organization\n\nGitLab currently enables its users to [use external secrets in CI](https://docs.gitlab.com/ee/ci/secrets/), by leveraging HashiCorp Vault, Google Cloud Secret Manager, and Azure Key Vault to securely manage keys, tokens, and other secrets at the project level. This allows users to separate these secrets from other CI/CD variables for security reasons.\n\n### GitLab Secrets Manager\n\nBesides providing support for external secrets in CI, GitLab is also working on introducing a [native solution to secrets management](https://gitlab.com/groups/gitlab-org/-/epics/10108) to securely and conveniently store secrets within GitLab. This solution will also help customers use the stored secrets in GitLab specific components and environments, and easily manage access at namespace groups and projects level. \n\n## Read more\n* [GitLab native secrets manager to give software supply chain security a boost](https://about.gitlab.com/blog/gitlab-native-secrets-manager-to-give-software-supply-chain-security-a-boost/)\n\n***Disclaimer:** This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab.*\n",[937,749,915,832,110,726],{"slug":4168,"featured":6,"template":678},"demystifying-ci-cd-variables","content:en-us:blog:demystifying-ci-cd-variables.yml","Demystifying Ci Cd Variables","en-us/blog/demystifying-ci-cd-variables.yml","en-us/blog/demystifying-ci-cd-variables",{"_path":4174,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4175,"content":4180,"config":4186,"_id":4188,"_type":16,"title":4189,"_source":17,"_file":4190,"_stem":4191,"_extension":20},"/en-us/blog/3-debugging-tips-we-learned-from-you",{"title":4176,"description":4177,"ogTitle":4176,"ogDescription":4177,"noIndex":6,"ogImage":2478,"ogUrl":4178,"ogSiteName":692,"ogType":693,"canonicalUrls":4178,"schema":4179},"3 Debugging tips we learned from you","We asked for your most unexpected causes of bugs. Here's what we learned.","https://about.gitlab.com/blog/3-debugging-tips-we-learned-from-you","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"3 Debugging tips we learned from you\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2021-03-31\",\n      }",{"title":4176,"description":4177,"authors":4181,"heroImage":2478,"date":4183,"body":4184,"category":14,"tags":4185},[4182],"Rebecca Dodd","2021-03-31","\n\nInfuriating, facepalm-inducing, but with an intensely satisfying payoff when (if!) you figure them out, bugs are an unavoidable part of being a developer.\n\n![Programmer debugging meme](https://about.gitlab.com/images/blogimages/debugging.png){: .medium.center}\n\u003C!-- image: https://www.reddit.com/r/ProgrammerHumor/comments/m2wy7v/20_goto_10/-->\n\nWhen senior developer evangelist [Brendan O'Leary](/company/team/#brendan) shared with us this amusing [story about a \"bug\" he solved](https://boleary.dev/blog/2021-01-27-the-purse-caper-debugging-can-be-hard.html) in a previous role, we knew we had to ask you about your most elusive bugs. Now we're sharing some of the best bug stories with you, along with some lessons.\n\nBrendan's example was in fact not a bug at all, but actually the result of an employee resting their purse on the keyboard. This is the first lesson: \n\n## Debugging tip 1: It might not be a bug at all\n\nA surprising number of \"bugs\" actually have nothing to do with code. One of the first principles of debugging is to reproduce the bug to get started. If you can't do that, it could be a sign that, er, human factors are at play. Consider this example from [@MrSimonEmms](https://twitter.com/MrSimonEmms) on Twitter:\n\n> I once spent an entire day chasing down an error because I put a backtick in – originally, I thought it was a piece of dirt on my screen.\n(This was 15+ years ago and the stacktrace wasn't even erroring in the correct file)\n\nIn fact, when we asked for your stories, [user errors](https://en.wikipedia.org/wiki/User_error) came up _a lot_:\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">customer reporting that the new software was not printing the letterhead. The letterhead is fed in from a different tray in the printer. The letterhead tray was empty.\u003C/p>&mdash; My dad calls me brucellosis. (@brucelowther) \u003Ca href=\"https://twitter.com/brucelowther/status/1366332712932569094?ref_src=twsrc%5Etfw\">March 1, 2021\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">During internship a long time ago. Everyone got paged, servers down!!! We rush to the office (yea, they hat the servers in the office) and found the cleaning lady needed a power plug while vacuuming the server room 😅 (true story)\u003C/p>&mdash; Bart 全部技術 (@ZenbuTech) \u003Ca href=\"https://twitter.com/ZenbuTech/status/1367826235951378434?ref_src=twsrc%5Etfw\">March 5, 2021\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">An employee who complained that the web app often wanted a fresh login during work. Searched and debugged nearly everything until we found out that this employee let other people work with their pc and cleaned the browser cache afterwards but wasn&#39;t aware that this reset the app.\u003C/p>&mdash; Bernhard Rausch (@rauschbit) \u003Ca href=\"https://twitter.com/rauschbit/status/1366499562295287813?ref_src=twsrc%5Etfw\">March 1, 2021\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\nAs Brendan noted in [his story](https://boleary.dev/blog/2021-01-27-the-purse-caper-debugging-can-be-hard.html), \"The lesson is that as humans interact with systems – or as systems become complex enough to take actions on their own – they will make mistakes. And while you can't possibly anticipate every one of those mistakes from the onset, when you encounter one, you can work on making sure you have observability at every level so you can see it when it happens.\"\n\n## Debugging tip 2: Get the receipts\n\n[This comment](https://www.linkedin.com/feed/update/urn:li:ugcPost:6770791699489796096?commentUrn=urn%3Ali%3Acomment%3A%28ugcPost%3A6770791699489796096%2C6773896843345580033%29) perfectly demonstrates why it's critical to require details such as Screen IDs when users or customers submit bug reports.\n\n![LinkedIn comment: \"a customer sharing over and over again the same old screenshot claiming that the bug still exists ... That's why all screens have now a small screenID and Version number that is required when screen-shotting issues and bugs!\"](https://about.gitlab.com/images/blogimages/screenshot-2021-03-30-at-12.28.25.png \"LinkedIn comment\"){: .shadow}\n\n## Debugging tip 3: Computers do what you tell them to\n\nWe asked you for examples of your most unexpected culprit when debugging.\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"und\" dir=\"ltr\">Me 🤣\u003C/p>&mdash; Steven เด็กน้อย (@TweetsByBooth) \u003Ca href=\"https://twitter.com/TweetsByBooth/status/1367859314728255490?ref_src=twsrc%5Etfw\">March 5, 2021\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\nWe appreciate the above commenter's self awareness, which brings us to our next lesson...\n\nThe code always does exactly what you tell it to – you just might be asking it to do something different from what you really meant. To get to the bottom of things, ask yourself what you expected the code to do versus what it actually did, and from there you'll usually find the answer staring you in the face.\n\n## Debugging tip 4: When in doubt, investigate the usual suspects\n\n[Occam's Razor](https://en.wikipedia.org/wiki/Occam%27s_razor) is your friend. It's often useful to rule out the obvious before you get too deep in debugging. Of course, no post about debugging would be complete without an off-by-one error, so we couldn't resist sneaking it into the title of this post (see what we did there? 😉)\n\nYour own codebase will no doubt have its usual suspects, as the interaction below demonstrates, so those are a good place to start.\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">DNS, it&#39;s always DNS!\u003C/p>&mdash; cronopio (@cronopio2) \u003Ca href=\"https://twitter.com/cronopio2/status/1367827879309025284?ref_src=twsrc%5Etfw\">March 5, 2021\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">If not, is definitely SELinux\u003C/p>&mdash; Łukasz Korbasiewicz (@korbasiewicz) \u003Ca href=\"https://twitter.com/korbasiewicz/status/1367861966446944258?ref_src=twsrc%5Etfw\">March 5, 2021\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\nWhether bugs drive you to distraction or you enjoy the challenge (probably both?), we want to hear about yours! Share in the comments below or tweet us [@GitLab](https://twitter.com/gitlab/).\n\nThumbnail photo by [Andrew Wulf](https://unsplash.com/@andreuuuw?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/rubber-duck?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[268],{"slug":4187,"featured":6,"template":678},"3-debugging-tips-we-learned-from-you","content:en-us:blog:3-debugging-tips-we-learned-from-you.yml","3 Debugging Tips We Learned From You","en-us/blog/3-debugging-tips-we-learned-from-you.yml","en-us/blog/3-debugging-tips-we-learned-from-you",{"_path":4193,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4194,"content":4200,"config":4206,"_id":4208,"_type":16,"title":4209,"_source":17,"_file":4210,"_stem":4211,"_extension":20},"/en-us/blog/five-signs-you-should-think-bigger",{"title":4195,"description":4196,"ogTitle":4195,"ogDescription":4196,"noIndex":6,"ogImage":4197,"ogUrl":4198,"ogSiteName":692,"ogType":693,"canonicalUrls":4198,"schema":4199},"Five signs you should think BIGGER!","Are you a designer who is frustrated with only focusing on the next milestone? Do you feel like you have to answer too many questions in every Issue? Do you feel like your product is not making any progress? **Time to Think Bigger!**","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099620/Blog/Hero%20Images/Blog/Hero%20Images/insights_insights.png_1750099620265.png","https://about.gitlab.com/blog/five-signs-you-should-think-bigger","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Five signs you should think BIGGER!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Iain Camacho\"}],\n        \"datePublished\": \"2021-03-30\",\n      }",{"title":4195,"description":4196,"authors":4201,"heroImage":4197,"date":4203,"body":4204,"category":14,"tags":4205},[4202],"Iain Camacho","2021-03-30","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nAs a designer, it’s difficult to balance the scale of initiatives: Design too small, and nobody is excited or can understand the direction things are going. Start too big and everyone on the team may be too intimidated to start. ThinkBIG is a way of utilizing designers’ natural skillset to balance the iterative nature of engineering with the visionary nature of design.\n\nHere are 5 signals that you should switch up your style and Think Bigger:\n\n### 1) Every milestone is spent only prepping the next\n\n#### Signal\n\nWe’ve all been there. The next milestone planning issue is starting to get filled out and you, the designer, are realizing how many issues need design in order to be ready. As the priorities shift, you know the last two weeks of this milestone will be spent desperately trying to design mockups for engineers to start working on days later. I like to call this “Feeding the sharks”. It describes a certain level of panic some designers feel every milestone: If I don’t deliver enough, I might get chomped!\n\n#### Solution\n\nThinkBIG focuses on creating a larger-scale vision that can be iterated on as we go. This means that each design you put together leads to many independent issues engineers can work on. For a designer, this increases [results](https://handbook.gitlab.com/handbook/values/#results) by delivering one design worth many issues.\n\n### 2) Engineers are asking _a lot_ of questions\n\n#### Signal\n\nHave you ever started a new milestone and as engineers get started, they have a million questions detailing every possible state, permutation, and example that they should account for? This line of questioning means you, the designer, now need to make a myriad of new designs with only minute changes between them. This is not an [efficient](https://handbook.gitlab.com/handbook/values/#efficiency) use of the designer’s time.\n\n#### Solution\n\nFirst off, all these questions are valid and decisions that need to be made. By Thinking Bigger, engineers are better prepared to handle all the edge cases independently because they walk into their work with a fuller context of the impact on users.  This enables empathy-driven engineering, allowing engineers to lead the conversation around edge-cases with solutions in mind, instead of needing it to be defined ahead of time. By pushing the edge cases further down the product development lifecycle, there is also a unique opportunity for product, design, and engineering to [collaborate](https://handbook.gitlab.com/handbook/values/#collaboration) on delivering value to customers while still working iteratively.\n\n### 3) Nobody agrees on what the “MVC” actually is\n\n#### Signal\n\nPicture it: You’ve worked hard for weeks refining and distilling a big feature ask into a nicely designed MVC. It’s small, delivers value, and is beautiful to boot! You’ve convinced your PM to prioritize this beautiful little gem and it’s going onto the planning board. Everything feels amazing until… devastation!\n\nAfter engineering looked at it, they came back and said it was too large and would need to be broken down further. Now you’re at the end of your milestone and you’re swiftly picking away at your beautiful design into a shallow imitation of its former glory.\n\n#### Solution\n\nHowever, there is a simple way to keep this from happening: “[Iteration](https://handbook.gitlab.com/handbook/values/#iteration) is a team sport”. The designer shouldn’t be the only person on the team compromising for the sake of MVC. With ThinkBIG, you have multiple chances to bring engineering into the fold early and with the full vision in mind. This means devs are part of the conversation from the start, able to craft a valuable iteration and your designs become the conversation piece of deciding “What can we do next to deliver an amazing experience to our customers?”\n\n### 4) We’re working so hard but not getting anywhere\n\n#### Signal\n\nWorking iteratively is incredibly powerful and at GitLab, we can see the value of an iterative approach. We’re able to change our priorities at a moment’s notice and the work we actually have to deliver is reasonable and manageable while continuously delivering new value to customers. There is, however, a small drawback: When you’re only focusing on the step immediately in front of you, it’s easy to get lost along the way.\n\n#### Solution\nAs a designer, we have a unique opportunity to be the navigator for our teams. Using the ThinkBIG model, designers are empowered to hold responsibility for the Vision. From here, the Product Manager/Product Designer relationship becomes a balance between the vision and the strategy. Designs based on the large vision are used to keep the team on track for hitting the targets that bring value to customers while allowing for collaboration with the rest of the team on what tiny steps we take to get there.\n\n### 5) Engineers are reworking a lot\n\n#### Signal\n\nMy engineer and I are excited to work on a new effort. I’ve designed the first iteration and successfully passed it to them.  While they’re building, I’m working on the design for the next iteration. A few weeks later the new changes are merged, the next iteration designs are ready, and customers are already seeing value. Your engineer looks at the next iteration and painfully mutters “Well, I’ll have to rewrite what I wrote the last milestone to account for this.”\n\n#### Solution\n\nIn a highly iterative development lifecycle, it’s not uncommon to have to rework things as the product evolves. However, it shouldn’t be happening every time. With ThinkBIG, engineers are informed of the long-term goal as well as the short-term MVC iteration. This extra context allows them to deliver the iteration while architecting their code in an informed way of where it will go.\n\n### Start Thinking BIGGER!\n\nAre some of these signals sounding familiar? Then switching your design style to ThinkBIG may be for you! The simplest way to make this change is to move iteration breakdown to **after** the design phase. It immediately shows engineers where we want to go as a product or feature, opens the implementation breakdown (MVC) conversation to the whole team, and provides incredibly valuable insight to everyone on the team. This model of working helps designers be more efficient, deliver results, and foster a tight collaboration with the broader team. To see this process in action, check out a [Package ThinkBIG around the dependency proxy design and research](https://www.youtube.com/watch?v=LXFu6oDxhsw). For more information, check out the GitLab Handbook on [ThinkBIG](https://about.gitlab.com/handbook/product/ux/thinkbig/) to learn more.\n",[1347,959,915,2409,771],{"slug":4207,"featured":6,"template":678},"five-signs-you-should-think-bigger","content:en-us:blog:five-signs-you-should-think-bigger.yml","Five Signs You Should Think Bigger","en-us/blog/five-signs-you-should-think-bigger.yml","en-us/blog/five-signs-you-should-think-bigger",{"_path":4213,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4214,"content":4219,"config":4224,"_id":4226,"_type":16,"title":4227,"_source":17,"_file":4228,"_stem":4229,"_extension":20},"/en-us/blog/integrating-gitlab-com-with-atlassian-jira-cloud",{"title":4215,"description":4216,"ogTitle":4215,"ogDescription":4216,"noIndex":6,"ogImage":4140,"ogUrl":4217,"ogSiteName":692,"ogType":693,"canonicalUrls":4217,"schema":4218},"How to integrate GitLab.com with Jira Cloud","Check out how to use the GitLab App on the Atlassian Marketplace to connect your merge requests, branches, and commits to a Jira issue.","https://about.gitlab.com/blog/integrating-gitlab-com-with-atlassian-jira-cloud","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to integrate GitLab.com with Jira Cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tye Davis\"}],\n        \"datePublished\": \"2021-03-25\",\n      }",{"title":4215,"description":4216,"authors":4220,"heroImage":4140,"date":4221,"body":4222,"category":14,"tags":4223},[3971],"2021-03-25","By moving to the cloud engineering teams can accelerate innovation and scale resources across an organization. The ease of access and reduced infrastructure costs that comes with moving to the cloud is a direct result of using a platform that easily integrates your data and keeps it secure yet accessible. Gitlab.com, the cloud (SAAS) platform for GitLab, modernizes data platforms to leverage new applications and advances end-to-end software delivery. GitLab partners with other best-in-class cloud companies so your teams can use tools that best align with your team's DevOps ecosystem. Application development requires speed and iteration, making seamless collaboration a necessity to deliver real business value. GitLab embraces connecting all phases of the software development lifecycle (SDLC) in a DevOps ecosystem that fuels visibility, collaboration, and velocity.\n\n## How to use GitLab with Atlassian's Jira\n\nWe know that many companies have been using Jira for project management, and have existing data and business processes built into their instance. For some of these customers, this means it can be difficult and cost-prohibitive to move off of Jira. We believe that people (and tools) work better when they're all in one place, so to serve these customers, we built a seamless integration between GitLab and Jira. By using the [GitLab for Jira app in the Atlassian Marketplace](https://marketplace.atlassian.com/apps/1221011/gitlab-com-for-jira-cloud), you can integrate GitLab.com and Jira Cloud harmoniously.\n\nHere's a short list of what you can do when integrating GitLab with Jira:\n\n* One GitLab project integrates with all the Jira projects in a single Jira instance.\n* Quickly navigate to Jira issues from GitLab.\n* Detect and link to Jira issues from GitLab commits and merge requests.\n* Log GitLab events in the associated Jira issue.\n* Automatically close (also called \"transition\") Jira issues with GitLab commits and merge requests.\n\n## How to configure the integration\n\nThere are two methods for configuring the integration. The [Jira DVCS connector](https://docs.gitlab.com/ee/integration/jira/dvcs/), and the method we describe in this blog post. The DVCS connector updates data only once per hour, while our method syncs data in real time. We recommend using our method for this reason, but if you are not using both of these environments then use the Jira DVCS connector instead.\n\n- First, go to Jira Settings > Apps > Find new apps, then search for GitLab.\n- Next, click GitLab for Jira, then click \"Get it now\". Or, go the [App in the marketplace](https://marketplace.atlassian.com/apps/1221011/gitlab-for-jira), directly.\n\n![Arrow pointing to \"get it now button\" on GitLab on Atlassian Marketplace App](https://about.gitlab.com/images/blogimages/atlassianjira/gitlabonatlassianmarketplace.png){: .shadow.medium.center}\nClick the yellow button to download the app.\n{: .note.text-center}\n\n- Third, after installing, click \"Get started to go to the configurations\" page. This page is always available under Jira Settings > Apps > Manage apps.\n\n![GitLab on Atlassian Marketplace App](https://about.gitlab.com/images/blogimages/atlassianjira/manageappsjira.png){: .shadow.medium.center}\nClick the \"Get started button\".\n{: .note.text-center}\n\n- Fourth, in Namespace, enter the group or personal namespace, and then click \"Link namespace to Jira\". The user that is setting up GitLab for Jira must have Maintainer access to the GitLab namespace. Note: The GitLab user only needs access when adding a new namespace. For syncing with Jira, we do not depend on the user’s token.\n\n![GitLab for Jira Configuration](https://about.gitlab.com/images/blogimages/atlassianjira/gitlabforjiraintegration.png){: .shadow.medium.center}\nAdd a namespace.\n{: .note.text-center}\n\nAfter a namespace is added, all of the future commits, branches, and merge requests within all projects under that namespace will be synced to Jira. At the moment, past data cannot be synced.\n\nFor more information, see [the documentation](https://docs.gitlab.com/ee/integration/jira/index.html#usage).\n\n### How to troubleshoot GitLab for Jira\n\nThe GitLab for Jira App uses an iframe to add namespaces on the settings page. Some browsers block cross-site cookies which can lead to a message saying that the user needs to log on to GitLab.com even though the user is already logged in: \"You need to sign in or sign up before continuing.\"\n\nIn this situation, we recommend using [Firefox](https://www.mozilla.org/en-US/firefox/), [Google Chrome](https://www.google.com/chrome/index.html) or enabling cross-site cookies in your browser.\n\n### What are the limitations of GitLab for Jira?\n\nThis integration is currently not supported on GitLab instances under a [relative URL](https://docs.gitlab.com/omnibus/settings/configuration.html#configuring-a-relative-url-for-gitlab) (for example, http://yourcompanyname.com/gitlab).\n\n## How to use GitLab for Jira\n\nAfter the integrating GitLab and Jira, you can:\n\n- Refer to any Jira issue by its ID in GitLab branch names, commit messages, and merge request titles.\n\n- Using commit messages in GitLab, you can move Jira issues along that Jira projects defined transitions.\n\n![GitLab for Jira Setup](https://about.gitlab.com/images/blogimages/atlassianjira/jiraissuescreenshot.png){: .shadow.medium.center}\nIn this image, you can see that this Jira issue has four stages: Backlog, selected for development, in progress, and done.\n{: .note.text-center}\n\n- As referenced in the base GitLab-Jira integration, when you reference an issue in a comment on a merge request and commit, e.g., PROJECT-7, the basic integration adds a comment in Jira issue. Also, by commenting in a Jira transition (putting a # first), this will move a Jira issue to the desired transition. Below is an example using the built-in GitLab Web IDE (this can be done in your Web IDE of choice as well).\n\n![View of Jira Transitions](https://about.gitlab.com/images/blogimages/atlassianjira/jiraissuescreenshot2.png){: .shadow.medium.center}\nThere are multiple Jira transition options.\n{: .note.text-center}\n\n- Now, the user can see linked branches, commits, and merge requests in Jira issues (merge requests are called \"pull requests\" in Jira issues).\nJira issue IDs must be formatted in UPPERCASE for the integration to work.\n\n![View branches, commits and merge requests in your jira issue](https://about.gitlab.com/images/blogimages/atlassianjira/jiraissuescreenshot4.png){: .shadow.medium.center}\nView branches, commits, and merge requests in your Jira issue.\n{: .note.text-center}\n\n- Click the links to see your GitLab repository data.\n\n![Deep Dive into your GitLab commits](https://about.gitlab.com/images/blogimages/atlassianjira/jiraissuescreenshot5.png){: .shadow.medium.center}\nHow to take a look at your GitLab commits.\n{: .note.text-center}\n\n![Deep Dive into your GitLab branches](https://about.gitlab.com/images/blogimages/atlassianjira/jiraissuescreenshot6.png){: .shadow.medium.center}\nTake a deep Dive into your GitLab merge requests.\n{: .note.text-center}\n\nFor more information on using Jira Smart Commits to track time against an issue, specify an issue transition, or add a custom comment, see the Atlassian page using [Smart Commits](https://support.atlassian.com/jira-cloud-administration/docs/enable-smart-commits/).\n\n## Watch and learn\n\nMore of a video person? For a walkthrough of the integration with GitLab for Jira, watch and learn how to configure GitLab Jira Integration using Marketplace App.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/SwR-g1s1zTo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nGitLab helps teams ship software faster with technology integration options, such as the integration with Jira, that automate tasks, provide visibility into development progress and the greater end-to-end software lifecycle. We recognize that many companies use Jira for Agile project management and our seamless integration brings Jira together with GitLab.\n\nCover image by [Mikołaj Idziak](https://unsplash.com/@mikidz) on [Unsplash](https://unsplash.com/photos/nwjRmbXbLgw).\n{: .note.text-left}\n",[873,894,937],{"slug":4225,"featured":6,"template":678},"integrating-gitlab-com-with-atlassian-jira-cloud","content:en-us:blog:integrating-gitlab-com-with-atlassian-jira-cloud.yml","Integrating Gitlab Com With Atlassian Jira Cloud","en-us/blog/integrating-gitlab-com-with-atlassian-jira-cloud.yml","en-us/blog/integrating-gitlab-com-with-atlassian-jira-cloud",{"_path":4231,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4232,"content":4238,"config":4243,"_id":4245,"_type":16,"title":4246,"_source":17,"_file":4247,"_stem":4248,"_extension":20},"/en-us/blog/iteration-and-code-review",{"title":4233,"description":4234,"ogTitle":4233,"ogDescription":4234,"noIndex":6,"ogImage":4235,"ogUrl":4236,"ogSiteName":692,"ogType":693,"canonicalUrls":4236,"schema":4237},"Why small merge requests are key to a great review","Massive merge requests lead to more problems than solutions. We explain how embracing iteration can lead to a better experience for the code author and code review.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681966/Blog/Hero%20Images/broken_wood.jpg","https://about.gitlab.com/blog/iteration-and-code-review","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why small merge requests are key to a great review\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2021-03-18\",\n      }",{"title":4233,"description":4234,"authors":4239,"heroImage":4235,"date":4240,"body":4241,"category":14,"tags":4242},[1441],"2021-03-18","\n\nThis post is adapted from a [GitLab Unfiltered blog post](/blog/better-code-reviews/) written by me, [David O'Regan](/company/team/#oregand). In [part one of our series](/blog/tips-for-better-code-review/), we explain the importance of fairness and empathetic thinking in code reviews and in [part two we explain why patch files bring added value to code reviews](/blog/patch-files-for-code-review/).\n{: .note .alert-info .text-center}\n\nThe [GitLab handbook defines iteration as doing the smallest thing possible to get it out as quickly as possible](https://handbook.gitlab.com/handbook/values/#iteration). If there was a single guiding principle I could suggest you lean into with your merge requests it would be iteration. At its heart, software is all about iteration. Software is about taking a large problem and breaking it down into smaller, more manageable problems. Like any other skill, iteration needs to be learned and practiced often to improve. The next time you're hitting the \"Submit merge request\" button, pause a moment and think if the merge you're about to submit could be be downsized.\n\n## Why smaller MRs are better\n\nThe only thing worse than writing a long merge request is reviewing a long merge request. This is why at GitLab, iteration (and by extension, [small merge requests](https://handbook.gitlab.com/handbook/values/#make-small-merge-requests)) is one of our driving values.\n\nWe even created a [DangerBot](https://docs.gitlab.com/ee/development/dangerbot.html) that will ask code authors to break down merge requests that are over a certain size.\n\nMassive merge requests can create technical problems for a code reviewer beyond added complexity. If a review goes beyond a certain number of lines, it simply becomes too difficult to reason through without checking out the branch, booting the project, and [smoke testing](https://en.wikipedia.org/wiki/Smoke_testing_(software)). While smoke testing complex reviews is a great idea, this process shouldn't become a habit for reviewing code. Big MRs can lead to merge conflicts, content rot, and other disasters.\n\n[Sarah Yasonik](/company/team/#syasonik), backend engineer on Monitor at GitLab, suggested that reviewers handle too-large or too-complicated merge requests by creating new, smaller MRs while reviewing, and reviewing the code in chunks. It's better to break up a too-big MR than to continue adding lines of code to an MR that is already too large.\n\n### The art of the follow-up\n\nAs the code author and code reviewer, there are a few best practices to abide by. Namely, if you are a code author and you offer a follow up review, be sure you always follow through on this promise.\n\nIf you are a code reviewer, here are four tips:\n\n*   Feel empowered to ask the code author for a follow up\n*   Accept any offers of a follow up graciously\n*   Be patient with code authors\n*   Know when it's best to reject a follow up offer\n\n## Practical tips for using iteration in code reviews\n\n### Why does iteration matter?\n\nThe smaller the merge request, the easier it is for the code reviewer to check. The idea of shipping small changes is consistent with GitLab's [iteration value](https://handbook.gitlab.com/handbook/values/#iteration). Clement Ho, my frontend engineering manager who has since left GitLab, was a major champion for iteration. Once I started paying close attention to how Clement broke down merge requests into small bites, I started to notice the benefits of iteration almost immediately. Iteration is so important to GitLab that CEO [Sid Sijbrandij](/company/team/#sytses) hosts [weekly office hours devoted to breaking down big projects](/handbook/ceo/#iteration-office-hours), and grades our team members on their [iteration competency](https://handbook.gitlab.com/handbook/values/#iteration-competency).\n\n### How small merge requests helps your reviewer\n\nIf iteration is all about releasing the [minimal viable change (MVC)](https://handbook.gitlab.com/handbook/values/#minimal-viable-change-mvc) in small merge requests, then it follows that engineers who fully embrace iteration will be shipping less code per merge request, to the delight of their reviewer.\n\nWe've all been there. We are assigned as a reviewer on an MR, and just as you're about to get comfortable you open the MR to see more than 1000 lines of code across multiple files. Time to refill your mug of coffee and get ready for a tiring review process.\n\nThe problems with large MRs should be obvious [if you've ever practiced self-reviews](/blog/tips-for-better-code-review/) or found yourself in this situation. Here are a few reasons why large MRs are indicative of bigger problems:\n\n*   Longer MRs have more lines of code\n*   There is the greater chance for brittle connections\n*   It becomes harder to follow the path of the solution/feature\n*   Screenshots usually cannot account for the volume of change\n*   It's much easier to miss bugs\n*   The author is sure to be left with lots of comments, which can be demoralizing\n\nIt's a simple concept, but one that is undervalued. Keep your merge requests small because:\n\n*   There are less lines of code to read\n*   Different contexts are separated into individual MRs\n*   The reviewer can follow along more easily\n*   It's easier to follow the path of a feature's development\n*   Less reviewer comments per MR is better for motivating the code author\n\nIn the end, we review code carefully at GitLab because we want to ensure that every release brings new value to our customers. If you have questions or comments about code reviews, creating smaller MRs, or iteration, leave us a comment on this blog post!\n\nGet more code review tips by reading the other blog posts in our series. In part one, we discuss [the role of fairness in code review](/blog/tips-for-better-code-review/) and in part two we share some [practical advice on using patch files](/blog/patch-files-for-code-review/).\n\n_Sara Kassabian contributed to this blog post._\n\nCover image by [Jon Sailer](https://unsplash.com/@eyefish73) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[1084,915],{"slug":4244,"featured":6,"template":678},"iteration-and-code-review","content:en-us:blog:iteration-and-code-review.yml","Iteration And Code Review","en-us/blog/iteration-and-code-review.yml","en-us/blog/iteration-and-code-review",{"_path":4250,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4251,"content":4257,"config":4262,"_id":4264,"_type":16,"title":4265,"_source":17,"_file":4266,"_stem":4267,"_extension":20},"/en-us/blog/patch-files-for-code-review",{"title":4252,"description":4253,"ogTitle":4252,"ogDescription":4253,"noIndex":6,"ogImage":4254,"ogUrl":4255,"ogSiteName":692,"ogType":693,"canonicalUrls":4255,"schema":4256},"How patch files can transform how you review code","We explain how to use patch files for better code review.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672273/Blog/Hero%20Images/patch.jpg","https://about.gitlab.com/blog/patch-files-for-code-review","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How patch files can transform how you review code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2021-03-15\",\n      }",{"title":4252,"description":4253,"authors":4258,"heroImage":4254,"date":4259,"body":4260,"category":14,"tags":4261},[1441],"2021-03-15","\n\nThis post is adapted from a [GitLab Unfiltered blog post](/blog/better-code-reviews/) written by me, [David O'Regan](/company/team/#oregand). In [part one of our series](/blog/tips-for-better-code-review/), we explain the importance of fairness and empathetic thinking in code reviews.\n{: .note .alert-info .text-center}\n\n## Patch files\n\nWanna know a `git secret`? [Patch files](https://git-scm.com/docs/git-format-patch) are magic when it comes to code reviews. A [patch is a text file whose contents are similar to Git diff](https://www.tutorialspoint.com/git/git_patch_operation.htm) but along with code it contains metadata about commits, for example, a patch file will include commit ID, date, commit message, etc. We can create a patch from commits and other people can apply them to their repository.\n\n## How to use a patch file\n\nA patch file is useful for code review because it allows the reviewer to create an actionable piece of code that shares their thoughts with the MR author. The code author can then apply the suggestion directly to their merge request. Patch files foster collaboration because it essentially creates a paired programming session in the review process.\n\nThis lets other people check your changes in the git patch files for any corrections that need to be made before the changes truly go live. After everything has been checked and corrections made, the changes can be pushed to the main branch of the repository. \n\nOne of the [better examples of a simple patch file in action](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/31686#note_341534370) comes from [Denys Mishunov](/company/team/#dmishunov), staff frontend engineer on the Create team.\n\n```bash\nIndex: app/assets/javascripts/projects/commits/components/author_select.vue\nIDEA additional info:\nSubsystem: com.intellij.openapi.diff.impl.patch.CharsetEP\n\u003C+>UTF-8\n===================================================================\n--- app/assets/javascripts/projects/commits/components/author_select.vue\t(revision 697d0734f1ae469a9a3522838e36b435d7cdf0be)\n+++ app/assets/javascripts/projects/commits/components/author_select.vue\t(date 1589356024033)\n@@ -110,6 +110,7 @@\n     \u003Cgl-new-dropdown\n       :text=\"dropdownText\"\n       :disabled=\"hasSearchParam\"\n+      toggle-class=\"gl-py-3\"\n       class=\"gl-dropdown w-100 mt-2 mt-sm-0\"\n     >\n       \u003Cgl-new-dropdown-header>\n\n```\n\nTo generate this suggestion, Denys pulled down the code he was reviewing and was able to offer a code solution based on his own testing. The patch file contains lots of valuable information, including the file affected, the date the revision was made, and the tool he used to generate the patch.\n\n## How to create a patch file\n\nYou can make a patch file using a web editor or with the command line. Read on to see how to create a patch file in GitLab both ways.\n\n### Patch files using a web editor\n\nIf you are rocking a nice fancy IDE or text editor, here's some good news: Most support patch files via plugins or out of the box. Here are some links to documentation on how to use patch files with different plugins: [VSCode](https://github.com/paragdiwan/vscode-git-patch), [Webstorm](https://www.jetbrains.com/help/webstorm/using-patches.html), [Atom](https://atom.io/packages/git-plus), and [Vim](https://vim.fandom.com/wiki/How_to_make_and_submit_a_patch).\n\n### Patch files using the command line\n\nOK command line users, you’ve made some commits, here’s your `git log`:\n\n```\ngit log --pretty=oneline -3\n\n* da33d1k - (feature_branch) Reviewer Commit 1 (7 minutes ago)\n\n* 66a84ah - (feature_branch) Developer 1 Commit (12 minutes ago)\n\n* adsc8cd - (REL-0.5.0, origin/master, origin/HEAD, master) Release 13.0 (2 weeks ago)\n\n``` javascript\n```\n\nThis command creates a new file, `reviewer_commit.patch`, with all changes from the reviewer's latest commit against the feature branch:\n\n```\n```git format-patch HEAD~1 --stdout > reviewer_commit.patch```\n\n### How to apply the patch\n\nFirst, take a look at what changes are in the patch. You can do this easily with `git apply`:\n\n```git apply --stat reviewer_commit.patch```\n```\n\nHeads up: Despite the name, this command won't actually apply the patch. It will just show the statistics about what the patch will do.\n\nSo now that we've had a look, let's test it first because not all patches are created equal:\n\n```\n```git apply --check reviewer_commit.patch```\n\nIf there are no errors we can apply this patch without worrying.\n\nTo apply the patch, you should use `git am` instead of `git apply`. The reason: `git am` allows you to sign off an applied patch with the reviewer's stamp.\n\ngit am --signoff &lt; reviewer_commit.patch\n\nApplying: Reviewer Commit 1\n\n``` javascript\n```\n\nNow run `git log` and you can see the `Signed-off-by` tag in the commit message. This tag makes it very easy to understand how this commit ended up in the codebase.\n\n### The benefits of patch files for code reviews\n\nSo now that you know how to make a shiny patch file, why would you use patch files as part of a code review process? There are a few reasons you might consider offering a patch file for a change you feel strongly about:\n\n*   It communicates you have invested a large amount of effort into understanding the author's solution and reasoning\n*   It demonstrates a passion for using teamwork to arrive at the best solution\n*   It shows the reviewer is willing to accept responsibility for this merge beyond just reading the code\n\nThere are a few alternatives to patch files for code reviews. GitLab has a [suggestion feature which allows the reviewer to suggest code changes using Markdown in a merge request](https://docs.gitlab.com/ee/user/discussions/#suggest-changes). The other option is to write raw code in Markdown right in the comment box. The downside is the reviewer doesn't have the option to test the code they are writing, making both of these options prone to error.\n\nIt is better to use a patch file because it involves the code reviewer in the review process in a collaborative way by default. In order to generate a patch, the reviewer must pull down the code, write the patch, test the change, and then submit it for the code author's consideration. Patch files increase the visibility for the reviewer and offers a fully collaborative experience for the code author.\n\nSome people might argue patch files are a cheeky way for a reviewer to force a change they would rather see make it into the codebase, but I believe that anyone who has taken the time to check out a branch, run the project, implement a change, and then submits that change back for a discussion is fully embracing collaboration.\n\nGitLab is evaluating whether to make patch files [part of the code review and merge request workflow](https://gitlab.com/gitlab-org/gitlab/-/issues/220044).\n\nLearn more about [the role of fairness in code review in part one of our blog series](/blog/tips-for-better-code-review/). Up next we explain why shipping small merge requests is in line with our iteration value.\n",[1084,915],{"slug":4263,"featured":6,"template":678},"patch-files-for-code-review","content:en-us:blog:patch-files-for-code-review.yml","Patch Files For Code Review","en-us/blog/patch-files-for-code-review.yml","en-us/blog/patch-files-for-code-review",{"_path":4269,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4270,"content":4276,"config":4281,"_id":4283,"_type":16,"title":4284,"_source":17,"_file":4285,"_stem":4286,"_extension":20},"/en-us/blog/tips-for-better-code-review",{"title":4271,"description":4272,"ogTitle":4271,"ogDescription":4272,"noIndex":6,"ogImage":4273,"ogUrl":4274,"ogSiteName":692,"ogType":693,"canonicalUrls":4274,"schema":4275},"How to write a more thoughtful code review","The best code reviews are empathetic and fair. We explain best practices for providing feedback.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663502/Blog/Hero%20Images/paperclips.jpg","https://about.gitlab.com/blog/tips-for-better-code-review","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to write a more thoughtful code review\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2021-03-09\",\n      }",{"title":4271,"description":4272,"authors":4277,"heroImage":4273,"date":4278,"body":4279,"category":14,"tags":4280},[1441],"2021-03-09","\n\nThis post is adapted from a [GitLab Unfiltered blog post](/blog/better-code-reviews/) written by [David O'Regan](/company/team/#oregand).\n{: .note .alert-info .text-center}\n\nFeedback matters to our personal and professional lives and software is no different. We deliver most if not all of our feedback to one another at GitLab using code reviews. We’re sharing some of our tools for you to add to your toolbelt when it comes to code reviews. In this post (the first of a three-part series) we share communication strategies for authors and reviewers.\n\n## Remember: Details matter for self-reviews\n\nAt GitLab, the [responsibility for the code lies with the merge request author](https://docs.gitlab.com/ee/development/code_review.html#the-responsibility-of-the-merge-request-author). We suggest code authors create a checklist to ensure that your i’s are dotted and your t’s are crossed before requesting a review. Here is an example MR checklist:\n\nBefore every feedback cycle:\n\n*   Re-read every line.\n*   Test your code locally.\n*   Write a test for every change (or as many as you can).\n*   Write a clear description and update it after each feedback cycle.\n*   Include at least one screenshot per change. More is better.\n*   Check and re-check your [labels](https://docs.gitlab.com/ee/user/project/labels.html). Then check them again.\n*   Consider using a ~\"workflow::refinement\" label for issues ahead of time as we do in the [Monitor:Health team](/handbook/engineering/development/ops/monitor/respond/). Read the documentation to [learn more about scoped labels](https://docs.gitlab.com/ee/user/project/labels.html#scoped-labels).\n*   Review the code as if you were the reviewer. Be proactive, answer the likely questions, and open follow-up issues ahead of time.\n*   If you want to see the last and most important part in the action, see how one of our frontend maintainers [Natalia Tepluhina](/company/team/#ntepluhina) [pre-answered a question she knew would be asked in one of her merge requests](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/33587#note_353564612).\n\n## Communicate with good intentions\n\nOne of the hardest parts of getting a code review right is communicating the human touch. When we give and receive feedback, human habit can create cognitive distortion by defaulting to the most negative aspects of that feedback. At GitLab, we try to highlight the importance of assuming positive intent by incorporating it in our [value system](https://handbook.gitlab.com/handbook/values/#assume-positive-intent).\n\n### How the conventional comments system can help in code review\n\nTo give feedback more effectively, try the [conventional comments system](https://conventionalcomments.org/), which was developed by senior frontend engineer, [Paul Slaughter](/company/team/#pslaughter). The conventional comments system calls for writing comments in a way that is useful for the reviewer and author of the merge request. It's so popular that one person made a browser extension (Chrome, Firefox) for it.\n\nThe convention comment system calls for starting a comment with a single, eye-catching word that defines the intent and tone for the comment. This method gives the reader a chance to understand where your comment is coming from.\n\nLet's try an experiment. If you submitted code for review, which comment would you prefer to read?\n\nOption one: What do you think about X instead?\n\nOption two: **suggestion (non-blocking)**\nWhat do you think about X instead?\n\nYou likely chose option two because it provided context for the comment, communicated empathy, and was framed as an invitation to try a different approach, instead of being written as a command or mandatory change.\n\nThe magic part of this comment is the first line **suggestion (non-blocking)**. Straight away, before you even read the comment, you know the two most important things about it:\n\n*   It's a suggestion from the reviewer\n*   It's non-blocking. Which means it's more of a friendly suggestion than a hard change that's necessary for the stability of the code.\n\nAnother advantage to this style of commenting is it allows merge request authors to understand the reviewer is not blocking their work. By highlighting what counts as a blocking and non-blocking comment, merge authors get the full context of what the reviewer is trying to communicate.\n\nFor example, you have submitted a merge request for review and your review comes back with eight comments.\n\nThe first option has no context in the comments. All comments are treated equally because they lack context for what counts as a blocker and what doesn't.\n\nOption two contextualizes comments using the conventional comments system. The comments can be treated by priority:\n\n*   Blockers: What needs to get the merge over the line.\n*   Non-blockers: What can be a separate merge or perhaps will spark a discussion.\n\nNext time you're reviewing code, try using the conventional comments approach. Pay attention to how it affects the way the merge request author responds to the review but also how you, the reviewer, feel leaving the review. We are considering integrating this feature directly into GitLab because we believe in making GitLab the best possible place for code reviews.\n\nIf you want to see a real-life example of some of Paul's work using conventional comments, check out [his reviews of my community contributions](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/24897) here at GitLab – you’ll see his empathy really shines through.\n\n## The role of \"fairness\" in code review\n\nIn many ways, code review is a form of negotiation, where the result of the negotiation is a selection of code that's valuable and held to a high standard. Central to being a good code reviewer (and good negotiator) is fairness. In fact, being a fair negotiator is often the most useful tool for code authors and code reviewers.\n\nFairness is actually mentioned twice in the [permissions to play guidelines](https://handbook.gitlab.com/handbook/values/#permission-to-play) at GitLab:\n\n*   \"Be dependable, reliable, fair, and respectful.\"\n*   \"Seek out ways to be fair to everyone.\"\n\n### How to be a fair author\n\nIn many ways, being a fair author is the easiest. Here are a few simple Dos and Don'ts to remember:\n\n**Do:**\n\n*   Write a proper description with screenshots (can't stress this one enough!)\n*   Understand a reviewer’s point of view when they make suggestions\n*   Address any strange parts of your merge upfront (we all have them)\n*   Be [open to collaboration](https://handbook.gitlab.com/handbook/values/#collaboration) on your work\n\n**Don't:**\n\n*   \"plz merge\"\n*   Be closed off or take offense to suggestions\n*   Forget to include any steps necessary for the build to run (or in other words, reduce the burden where possible)\n\nHonestly, it's pretty simple to be a fair author of a merge request. Even the smallest amount of empathy goes a long way, particularly when you remember that the person reviewing your code gets nothing extra for their efforts. They just want to help take your merge to the next level.\n\n### How to be a fair reviewer\n\nBeing fair as a reviewer is a bit more challenging because every individual has opinions or biases about how a piece of code should be written. Bias is something we all deal with when it comes to how we want things to be because we all have our own styles, preferences, and ideas about how software should be written.\n\nBias can create problems when it comes to code reviews because it's common for personal preferences to emerge when reviewing someone else's code. The typical reviewer might catch themselves thinking in absolutes, and the number of unresolved comments grows.\n\nIf you have ever reviewed a merge request and found yourself thinking things like:\n\n*   \"It should be written like this\"\n*   \"Why would they do it like that?\"\n*   \"I would have done it this way\"\n*   \"That's not how that should be done!\"\n\nIf this sounds familiar, then you may have fallen victim to a common cognitive distortion: Should/must statements.\n\nIt is important for any reviewer to remember that just because a code author wrote code in a different style or manner from you, doesn't mean that the code is written incorrectly. If you catch yourself writing a review comment that includes the words \"should\" or \"must\" then you ought to take a step back and think about whether your suggestions are coming from a place of fairness or a place of bias. Ask yourself: Is the use of absolutes warranted here? Sometimes it will be both fair and warranted. One example is if your company follows a set of coding conventions like we do at GitLab. Stay vigilant for times when those statements are a thin veil for a personal preference.\n\nIf you do need to use a should/must statement, be sure to back up your assertions with documentation to help the code author understand why a change must be made.\n\nTypically, the fair response to something you don't agree with is to ask _why_ an author wrote code this way, instead of saying it must be another way.\n\nThis is part one of a three-part series on code review. Up next we will be explaining why patch files are a useful tool for reviewers.\n\nIf you have questions or comments about code reviews, creating smaller MRs, or iteration, leave us a comment on this blog post!\n\n_Sara Kassabian contributed to this blog post._\n\nCover image by [Jackson Simmer](https://unsplash.com/@simmerdownjpg) on [Unsplash](https://unsplash.com/photos/Vqg809B-SrE).\n{: .note}\n",[1084],{"slug":4282,"featured":6,"template":678},"tips-for-better-code-review","content:en-us:blog:tips-for-better-code-review.yml","Tips For Better Code Review","en-us/blog/tips-for-better-code-review.yml","en-us/blog/tips-for-better-code-review",{"_path":4288,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4289,"content":4294,"config":4301,"_id":4303,"_type":16,"title":4304,"_source":17,"_file":4305,"_stem":4306,"_extension":20},"/en-us/blog/starting-from-the-start-slippers-design-system",{"title":4290,"description":4291,"ogTitle":4290,"ogDescription":4291,"noIndex":6,"ogImage":4042,"ogUrl":4292,"ogSiteName":692,"ogType":693,"canonicalUrls":4292,"schema":4293},"Why design systems benefit everyone","Learn how the GitLab digital experience team built the Slippers design system for our marketing website.","https://about.gitlab.com/blog/starting-from-the-start-slippers-design-system","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why design systems benefit everyone\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Stephen McGuinness\"}],\n        \"datePublished\": \"2021-03-05\",\n      }",{"title":4290,"description":4291,"authors":4295,"heroImage":4042,"date":4297,"body":4298,"category":14,"tags":4299},[4296],"Stephen McGuinness","2021-03-05","\n\nThe [Digital Experience team](/handbook/marketing/digital-experience/) is new at GitLab, but we spent the past few months [creating Slippers, a new design system, which is a centralized location for design assets and code](https://gitlab.com/gitlab-com/marketing/digital-experience/slippers-ui). This blog post explains how we managed to build a design system in record time and accounts for how we overcame some of the challenges we encountered along the way.\n\nWe built Slippers because we needed a design system that we could rapidly iterate on and that would scale. We needed to use technologies that offered a single source of truth so our growing team could build on the repo. This process is not without its frustrations – what can work for one team might not work for the entire marketing department. In the past, discrepancies in design would happen because we didn't have a style guide.\n\nFortunately, creating a system that can respond to quick iterations can provide a solution to this complex problem. But \"simple\" in this case is misleading. We needed a new way of thinking and working. It is not enough to create a UI kit of consistent design assets for your designers to work with, doing this alone will fall at the first hurdle if it is not reflected in a coded repo. Designs will produce variations over time. Technical and design debt builds up due to small changes made over time and you end up where you started – with fragmented design and code.\n\nTime and effort as well as a vision are necessary to create a design system solution. This is the place our new team was at near the end of 2020. An already bizarre year for many, this was a great time to create a team to tackle this technical challenge head-on.\n\n## Why design systems are for everyone\n\nA common misconception of a design system is that it is for designers. You create a UI kit, hand it to developers, and you are off to the races. While a UI kit is important to the success of a system, it is just one part of what is a technical and efficient product.\n\nOur goal was to create a reusable library of assets, which included design assets (typestack, colors, spacing, grid, buttons, etc.) along with documentation on usage criteria. This is a big project that requires a lot of effort. First, we aligned around a common vision and product architecture. I want to emphasize \"product\" because this system acts as a product serving multiple teams across GitLab. Next, we rallied our team around a common goal and got to work. Our team established a set of guiding principles that would always act as our anchor for the project. [You can read more about them here](https://gitlab.com/gitlab-com/marketing/digital-experience/slippers-ui).\n\n*\"The more decisions you put off, and the longer you delay them, the more expensive they become.\"*\n\n**―[Craig Villamor](https://www.linkedin.com/in/craigvillamor), senior design director of Google Maps**\n\nWe found this quote from Craig in a [Medium post about the benefits of design systems](https://medium.com/agileactors/7-quotes-about-design-systems-that-will-inspire-you-9a89557fb26f). His remarks describe the dangers of putting off building a design system for too long. The fact is, the longer you design without a clear system and rubric, the more tech and design debt accumulates.\n\n## How we built the design system\n\nProducts exist to solve problems, so we articulated our vision with working sessions. The sessions were a platform for aligning our vision based on what we considered maintainable design and technology.\n\nOnce we aligned on our guiding principles we set about creating a roadmap. Our team decided how we wanted our product to be built, and agreed on tooling, tech stacks, and a cadence of delivery during our working sessions.\n\nWe decided on Figma for design since this was already being used within GitLab. Next, we created our core elements along with some [baseline components such as type, color, and spacing for the design system](https://www.figma.com/file/nWIOpmuMp7RZXmfTj6ujAF/Slippers_foundations?node-id=1292%3A573). We used existing pages as templates to refactor and give us a broader idea of what was and was not working. This process gave our developers time to investigate the best way to code our product and determine what shape it would take.\n\n## The value of a shared language\n\nOur engineering team started working on our tech stack and our designers started to work on what we called our \"foundations\". This can also be referred to as \"elements\". We did this in a way so we could stress-test our foundations package by refactoring existing pages with new styles that gave us an idea of the direction of our design system.\n\nNext, we applied these core elements to a select sample of pages to act as a proof of concept. We chose to edit the [homepage](https://about.gitlab.com/), [enterprise page](/enterprise/), [pricing page](/pricing/), and [entire GitLab Blog section](/blog/). We identified pain points and apply stop-gaps along the way. Since we are [results-driven](https://handbook.gitlab.com/handbook/values/#results), we used local CSS (Cascading Style Sheets) tightly coupled to the site itself. The perk of this approach is that you can deliver results quickly. After doing some UX and UI refinements on these pages, introducing new technology was easier because each of the pages are actively maintained. We used this time to learn and apply this practice to improve the system.\n\n## What's next\n\nThough the Digital Experience team has only been established for four months we've made huge inroads. We are starting to see how the Slippers design system will look once it is implemented across the entire organization.\n\nBuilding the Slippers design system is an example of a research and development (R&D) project. By laying out these foundations, we are set up for large-scale learning and success. The team is continuously gathering data for this R&D project and using it to better inform and refine our design system.\n\nAlso, since GitLab is open source, we are factoring open source values into our Slippers roadmap. We do this through posting our video updates to our partners and [public YouTube videos](https://www.youtube.com/c/GitLabUnfiltered/featured).\n\nThe reality is, this work takes time and investment. There is a herculean effort still left for us to bring the system fully to life. But already we have demonstrated the value of a design system to our leadership by delivering more than 2000 new CMS pages.\n\nEven at this very early stage the Slippers project has been rewarding and provides us with a continuous source of valuable insights. We're encouraged to push the boundaries and take calculated risks in what we learn and what we do.\n\nStay up-to-speed on our progress by checking out our [Slippers project](https://gitlab.com/gitlab-com/marketing/digital-experience/slippers-ui) and [watching our team videos on GitLab Unfiltered](https://www.youtube.com/c/GitLabUnfiltered/featured).\n\nCover photo by [Nihal Demirci](https://unsplash.com/@nihaldemirci?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/0ME-BIUBmUs)\n{: .note}\n",[1347,915,4300,1144],"UI",{"slug":4302,"featured":6,"template":678},"starting-from-the-start-slippers-design-system","content:en-us:blog:starting-from-the-start-slippers-design-system.yml","Starting From The Start Slippers Design System","en-us/blog/starting-from-the-start-slippers-design-system.yml","en-us/blog/starting-from-the-start-slippers-design-system",{"_path":4308,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4309,"content":4314,"config":4321,"_id":4323,"_type":16,"title":4324,"_source":17,"_file":4325,"_stem":4326,"_extension":20},"/en-us/blog/production-grade-infra-devsecops-with-five-minute-production",{"title":4310,"description":4311,"ogTitle":4310,"ogDescription":4311,"noIndex":6,"ogImage":3671,"ogUrl":4312,"ogSiteName":692,"ogType":693,"canonicalUrls":4312,"schema":4313},"GitOps & DevSecOps for production infrastructure in minutes","Unlock production-grade infrastructure and development workflows in under five minutes with Five Minute Production App: a blend of solutions offered by AWS, Hashicorp Terraform, and GitLab.","https://about.gitlab.com/blog/production-grade-infra-devsecops-with-five-minute-production","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Production-grade infrastructure, GitOps convergence, and DevSecOps in under 5 minutes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sri Rangan\"}],\n        \"datePublished\": \"2021-02-24\",\n      }",{"title":4315,"description":4311,"authors":4316,"heroImage":3671,"date":4318,"body":4319,"category":14,"tags":4320},"Production-grade infrastructure, GitOps convergence, and DevSecOps in under 5 minutes",[4317],"Sri Rangan","2021-02-24","\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2021-03-10.\n{: .note .alert-info .text-center}\n\nThis is a story about achieving production-grade infrastructure in under five minutes.\\\\\nThis is a story about achieving production-grade DevSecOps in under five minutes.\\\\\nThis is a story about achieving total convergence of GitOps in under five minutes.\n\nMy name is Sri and over the last three months and I worked closely with GitLab co-founder [DZ](/company/team/#dzaporozhets) in building \"Five Minute Production App.\"\n\nThe app blends solutions offered by AWS, Hashicorp Terraform, and GitLab, and offers production-grade infrastructure and development workflows in under five minutes.\n\n![Five Minute Production App Diagram](https://about.gitlab.com/images/blogimages/five-min-prod-01-complete-flow.png){: .shadow.medium.center}\n\nApart from the efficiencies gained from using Five Minute Production App, you benefit by achieving stateful, production-ready infrastructure on the AWS hypercloud.\n\nWe started with AWS first, as it is the hypercoud leader today. Support for Azure and Google Cloud is on the roadmap.\n\nOur vision and design decisions are explained in the [README](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template#quickly).\n\n## Quickstart \n\nWe start with your GitLab project which has the source code of your web application. Regardless of which language or framework you use, your web application is packaged as a container image and stored within your GitLab project's Container Registry.\nThis is the Build stage.\n\nThis is followed by the Provision stage where Terraform scripts connect to AWS and create a secure environment for your web application.\nThe environments provisioned relate to your Git branching workflow.\nLong-lived Git branches create long-lived environments, and short-lived Git branches correspond to short-lived environments.\n\nResources provisioned include an Ubuntu VM, scalable PostgreSQL database, a Redis cluster, and S3 object storage.\nWe consider these elements as the building blocks for majority of web applications, and many of these fall under AWS free tier.\n\nThe infra state and credentials are stored within your GitLab project's managed Terraform state.\n\nFinally, we reach the Deploy stage which:\n1. Retrieves the deployable image from the GitLab Container Registry\n1. Retrieves the infrastructure credentials from the Gitlab Managed Terraform State, and\n1. Proceeds to deploy your web application\n\nEverything is achieved by including these two lines in your `.gitlab-ci.yml` file.\n\n```yaml\ninclude:\n  remote: https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template/-/raw/stable/deploy.yml\n```\n\nLet's look at the complete process in more detail.\n\n![Three stages of Five Minute Production App](https://about.gitlab.com/images/blogimages/five-min-prod-02-pipeline.png){: .shadow.medium.center}\nThe three stages of Five Minute Production App\n{: .note.text-center}\n\n## Build and package\n\nThe Build stage is where it all begins. Five Minute Production App reuses the [Auto Build stage](https://docs.gitlab.com/ee/topics/autodevops/stages.html#auto-build) from the GitLab Auto DevOps pipeline.\n\nAuto Build builds and packages web applications that are:\n1. Containerized with a Dockerfile, or\n2. Compatible with the Cloud Native buildpack, or\n3. Compatible with the Heroku buildpack\n\nThus, web applications across multitudes of technologies are supported, including web frameworks such as Rails, Django, Express, Next.js, Spring, etc.\nand programming languages including Python, Java, Node.js, Ruby, Clojure, etc.\n\nOnce the Auto Build job has finished execution, the newly created container image is stored as an artifact in your GitLab project's Container Registry.\n\n## Provision the infrastructure\n\nThe next step, Provision, prepares infrastructure resources in AWS.\nThe first requirement here is the presence of AWS credentials stored as CI/CD variables at the project or group level.\nOnce valid AWS credentials are found, a Terraform script is executed to generate resources in AWS.\n\nThese resources include:\n1. EC2 VM based on Ubuntu 20.04 LTS\n2. PostgreSQL database managed by AWS RDS\n3. Redis cluster managed by AWS ElastiCache\n4. S3 bucket for file storage\n5. Email Service credentials managed by AWS SES\n\nThe most critical resource is the PostgreSQL service which has daily backups enabled.\nPostgreSQL data is snapshotted if the infrastructure resource is \"destroyed\" through a manual user action via the Five Minute Production App pipeline.\n\nThe EC2 VM is the only service accessible publicly. Ports 22, 80 and 443 are exposed.\nEvery other resource described above is part of a secure, private network, hidden from the public web, accessible ony via the EC2 instance and your web applicable deployed there.\n\nThe stateful services and environments are tied to your Git branches.\\\\\nThis means every Git branch creates a new environment with these resource sets.\\\\\nWe don't have a preference on your Git branching and environments lifecycle.\\\\\nUse long-lived or short-lived branches as you see fit, just keep in mind that long-lived branches leads to long-lived environments and short-lived branches leads to short-lived environments.\n\n![Infrastructure resources provisioned on AWS](https://about.gitlab.com/images/blogimages/five-min-prod-03-infra-resources.png){: .shadow.medium.center}\nInfrastructure resources provisioned on AWS\n{: .note.text-center}\n\n## Deploy your web application\n\nFinally comes the Deploy stage.\n\nThis is where the deploy script retrieves your web application package (container image) from the GitLab Container Registry, then retrieves the EC2 instance\ncredentials from the GitLab Managed Terraform State, and proceeds to deploy the relevant version of your web application in its environment.\n\nModern web applications might require additional commands being executed after each deployment or after the initial deployment,\nand these commands can be defined as variables in your `.gitlab-ci.yml` file.\n\nFinally, with the help of Certbot from Letsencrypt, SSL certificates are generated and configured for your web application.\nIf you have defined the `CERT_DOMAIN` CI/CD variable the SSL certificate will be generated for your custom domain name.\nOtherwise the generated SSL certificate uses a dynamic URL that Five Minute Production App prepares for you.\n\n## Conclusion\n\nThere we have it. A simple yet production-ready setup for your web application. If you are looking for an AWS-based setup, this is ready for usage.\n\nIf you are looking for something similar but not quite Five Minute Production App, this serves as an example of how to converge infrastructure-as-code with software development and provide seamless continuous deployment workflows.\n\nIn my personal experience, this is one of the most complete examples of GitOps:\n\n1. Your application source code lives in your GitLab project\n2. Your infrastructure defined as code lives in your GitLab project\n3. Your CI/CD pipeline lives in your GitLab project\n4. Your infrastructure state lives in your GitLab project\n5. Your infrastructure secrets and credentials live in your GitLab project\n6. Your environments configuration lives in your GitLab project\n\nThis complete GitOps convergence is not specifically configured for one project. It can be included as a template from multiple projects.\nThere is no reason why the GitLab project in your organization cannot be the single source of truth for everything.\n\n### Links\n\n- [Five Minute Production App](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template/-/blob/master/README.md)\n- [Reference Examples](https://gitlab.com/gitlab-org/5-minute-production-app/examples)\n\n### About the author\n\n[Sri Rangan](mailto:sri@gitlab.com), an Enterprise Solutions Architect with GitLab, is a core-contributor and maintainer\nof [Five Minute Production App](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template/-/blob/master/README.md).",[832,937,894,873,535,1286],{"slug":4322,"featured":6,"template":678},"production-grade-infra-devsecops-with-five-minute-production","content:en-us:blog:production-grade-infra-devsecops-with-five-minute-production.yml","Production Grade Infra Devsecops With Five Minute Production","en-us/blog/production-grade-infra-devsecops-with-five-minute-production.yml","en-us/blog/production-grade-infra-devsecops-with-five-minute-production",{"_path":4328,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4329,"content":4335,"config":4340,"_id":4342,"_type":16,"title":4343,"_source":17,"_file":4344,"_stem":4345,"_extension":20},"/en-us/blog/pipeline-editor-overview",{"title":4330,"description":4331,"ogTitle":4330,"ogDescription":4331,"noIndex":6,"ogImage":4332,"ogUrl":4333,"ogSiteName":692,"ogType":693,"canonicalUrls":4333,"schema":4334},"Meet Pipeline Editor, your one-stop shop for building a CI/CD pipeline","The Pipeline Editor reduces the complexity of configuring your CI/CD pipelines.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665961/Blog/Hero%20Images/image_cover.jpg","https://about.gitlab.com/blog/pipeline-editor-overview","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Meet Pipeline Editor, your one-stop shop for building a CI/CD pipeline\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2021-02-22\",\n      }",{"title":4330,"description":4331,"authors":4336,"heroImage":4332,"date":4337,"body":4338,"category":14,"tags":4339},[1020],"2021-02-22","\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2021-03-02.\n{: .note .alert-info .text-center}\n\nIn GitLab 13.8, we introduced the first iteration of the [Pipeline Editor](/releases/2021/01/22/gitlab-13-8-released/): a dedicated editor designed for authoring your CI/CD. It is your one-stop shop for everything you need to configure your CI/CD pipelines.\n\n## Why do we need a dedicated editor for pipelines?\n\nGitLab's advanced syntax provides a high degree of customization for sophisticated and demanding CI/CD use cases. However, all of this power and flexibility comes with a fair bit of complexity. The Pipeline Editor helps you mitigate this challenge and serves as a single solution that groups all existing CI authoring features in a single location. It is our foundation, and we plan to build on it with enhancements in future iterations. \n\n## Getting started\n\nIn order for the pipeline editor to work, you'll first need to create a `.gitlab-ci.yml` file in your project. The `.gitlab-ci.yml` is a [YAML file](https://en.wikipedia.org/wiki/YAML) where you configure specific GitLab CI/CD instructions. Check out how we are working on [improving the first-time experience of creating a `.gilab-ci.yml` file directly from the Pipeline Editor](https://gitlab.com/groups/gitlab-org/-/epics/5276). \n\n### Continuous validation\nOnce you have created the `.gitlab-ci.yml` file and navigated to it in the Pipeline Editor, you can begin editing your configuration. Writing YAML can be error prone. No matter how technical or skilled you are, programming mistakes happen. Sometimes an indentation will be missed, the incorrect syntax is used, or the wrong keyword is selected, and that's OK! As you start authoring your pipeline, GitLab will inspect the pipeline configuration using our linting APIs and provide you with an indicator of whether your pipeline configuration is valid or not. We will continuously validate your pipeline without making any changes to your pipeline configuration, so you can have confidence in hitting \"merge\" and running your pipeline without any surprises. \n\n![Continuous validation of pipelines](https://about.gitlab.com/images/blogimages/2020-02-08-Pipeline-editor-overview/image1.png){: .shadow.medium.center}\nContinuous validation of your pipelines\n{: .note.text-center}\n\n### Pipeline visualizer: Seeing is believing\nIt's practically impossible to envision what a pipeline should look like when you start writing from a blank YAML file. Luckily, GitLab provides you with a full pipeline view for every running pipeline. But, what if you want to visualize your pipeline _before_ they begin to run? Well, you can do that now by navigating to the \"Visualize\" tab in the Pipeline Editor. You'll find an illustration that shows how your pipeline should look as you write it, similar to the linter, and GitLab will display the visual before making any commits, before running, or before altering your pipeline in any way.\n\nIn the visualization, we will group all your defined pipeline jobs by stages and add links between the jobs based on the [needs](https://docs.gitlab.com/ee/ci/yaml/#needs) relationships you've configured.\n\nIf we take a look at the example below, you can easily see that I've configured a three-stage pipeline, where the build stage has three jobs (step 1-3), and that step 4 needs steps 1 and 3.\n\n![Pipeline editor overview](https://about.gitlab.com/images/blogimages/2020-02-08-Pipeline-editor-overview/image2.png){: .shadow.medium.center}\nPipeline visualizer\n{: .note.text-center}\n\nHere is what the YAML looks like:\n\n ```yaml\nimage: alpine:latest\n\nstages:\n   - test\n   - build\n   - deploy\n\nprepare:\n   script: exit 0\n   stage: test\n\nstep1:\n   script: echo testo\n   stage: build\nstep2:\n   script: echo testo\n   stage: build\nstep3:\n   script: echo testo\n   stage: build\n\nstep4:\n   needs: ['step1', 'step3']\n   script: exit 0\n   stage: deploy\n ```\n\n### View an expanded version of the CI/CD configuration\nWhen configuring pipelines, you use keywords like 'include' and 'extends' often. These keywords help break down one long pipeline configuration file into multiple files, which increases readability and reduces duplication. Unfortunately, those keywords can make a pipeline configuration hard to follow. In some configurations, a pipeline configuration file can be mostly composed of a list of other included configuration files.\n\nTo make the configuration easier to follow, we've added the ability to view a version of your pipeline configuration with all of the 'includes' and 'extends' configurations merged together as a fourth tab in the Pipeline Editor. Now it's much easier to understand more complex pipeline flows and this simplifies the debugging process.\n\nPipeline configuration example:\n\n![pipeline configuration](https://about.gitlab.com/images/blogimages/2020-02-08-Pipeline-editor-overview/image6.png){: .shadow.medium.center}\n\nThe expanded version of the pipeline configuration:\n\n![expanded pipeline configuration](https://about.gitlab.com/images/blogimages/2020-02-08-Pipeline-editor-overview/image7.png){: .shadow.medium.center}\n\n### Lint\n\nThe CI lint helps you validate your pipeline configuration and provides you with additional information about it. That's why we've copied the existing CI linter (which was well hidden in our jobs page) to the Pipeline Editor as a third tab.\n\nThe linter provides you with detailed information about every job you've configured in your pipeline. For each job, it provides the [before_script](https://docs.gitlab.com/ee/ci/yaml/#before_script), [after_script](https://docs.gitlab.com/ee/ci/yaml/#after_script), and [script](https://docs.gitlab.com/ee/ci/yaml/#script) fields, tags, environment names, branches it should run, and more…\n\nIf you look at the following example, just by looking at the linter tab you'll know that the `prepare` job:\n* Runs in the `prepare` stage\n* Contains `before_script`, `script`, and `after_scripts` fields \n* Runs only on master \n* Runs upon failure\n* Tag as production\n* Has the environment set to production \n\n![image3](https://about.gitlab.com/images/blogimages/2020-02-08-Pipeline-editor-overview/image3.png){: .shadow.medium.center}\n\nIn this second example, you can see that the build job is a manual job that runs on all branches and is allowed to fail:\n\n![Manual build job](https://about.gitlab.com/images/blogimages/2020-02-08-Pipeline-editor-overview/image5.png){: .shadow.medium.center}\n\n## How the Pipeline Editor came about\n\nEarlier this year, we decided to split continuous integration into two separate teams: [Continuous Integration](/direction/verify/continuous_integration/), which is responsible for improving the experience of running a CI/CD pipeline, and [Pipeline Authoring](/direction/verify/pipeline_composition/), responsible for helping you author your pipeline. We've defined the Pipeline Authoring team goal as, \"Making the authoring experience as easy as possible for both advanced and novice users.\"\n\n![Verify Groups](https://about.gitlab.com/images/handbook/engineering/verify/verify_groups_banner.jpg){: .shadow.center}\n\nAs a team, we realized that a dedicated authoring area is needed to achieve our [ambitious roadmap](https://youtu.be/hInM7JUEH4Y) – this is when the Pipeline Editor idea was formed. \n\n## Try out Pipeline Editor yourself\n\nThat's it! I hope you found this overview useful. To get started with GitLab CI, you can [try out our hosted GitLab.com solution](/free-trial/), or you can [download GitLab Self-Managed](/free-trial/) and read its documentation for more in-depth coverage of the functionality. \n\nIf you are using our Pipeline Editor, we would love it if you leave us a note on our [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/298928)! If you'd like to learn more about the upcoming features, feel free to read through the [Pipeline Editor second iteration epic](https://gitlab.com/groups/gitlab-org/-/epics/4814), and tag `@dhershkovitch` if you have any questions.\n",[832,937,894,749],{"slug":4341,"featured":6,"template":678},"pipeline-editor-overview","content:en-us:blog:pipeline-editor-overview.yml","Pipeline Editor Overview","en-us/blog/pipeline-editor-overview.yml","en-us/blog/pipeline-editor-overview",{"_path":4347,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4348,"content":4354,"config":4360,"_id":4362,"_type":16,"title":4363,"_source":17,"_file":4364,"_stem":4365,"_extension":20},"/en-us/blog/improve-your-gitlab-productivity-with-these-10-tips",{"title":4349,"description":4350,"ogTitle":4349,"ogDescription":4350,"noIndex":6,"ogImage":4351,"ogUrl":4352,"ogSiteName":692,"ogType":693,"canonicalUrls":4352,"schema":4353},"10 tips to make you a productive GitLab user","Learn how quick actions can make you a more efficient GitLab user.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666717/Blog/Hero%20Images/cover-image.jpg","https://about.gitlab.com/blog/improve-your-gitlab-productivity-with-these-10-tips","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"10 tips to make you a productive GitLab user\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"},{\"@type\":\"Person\",\"name\":\"Roman Kuba\"}],\n        \"datePublished\": \"2021-02-18\",\n      }",{"title":4349,"description":4350,"authors":4355,"heroImage":4351,"date":4357,"body":4358,"category":14,"tags":4359},[1504,4356],"Roman Kuba","2021-02-18","\nMost people know GitLab is a solid tool in today's DevOps workflows, with code reviews, CI/CD, and project management all available for users in a single application. But there are always ways to be more efficient. Since we use GitLab to develop GitLab, everyone has their own habits and hidden gems to speed things up.\n\nWe chatted about GitLab efficiency tips after seeing new [quick actions releases in GitLab 13.8](/releases/2021/01/22/gitlab-13-8-released/#display-all-available-quick-actions-in-autocomplete), and decided to share some of our favorite tips with GitLab users. We share our typical day-to-day workflows as an engineering manager (Roman) and a developer (Michael) to show how quick actions make teams more productive and efficient.\n\n### Roman: Engineering manager starts planning\n\nI am an engineering manager on the [Create: Editor team](/handbook/product/categories/features/#createeditor-group) at GitLab. One of my responsibilities is capacity planning with product managers. Planning happens every month for the next [GitLab release](/releases/). GitLab uses the [milestone feature](https://docs.gitlab.com/ee/user/project/milestones/) to keep everything organized for the release. As planning goes on, I need to create a new issue for a new feature in the Web IDE. The issue description uses a [description template](https://docs.gitlab.com/ee/user/project/description_templates.html) which gets filled with the right context.\n\nBut instead of searching for the assignee in the dropdown, I just add a new line:\n\n```\n/assign @dnsmichi\n```\n\nAll quick actions start with a `/` character and will be interpreted by GitLab when the issue gets created. In addition to an assignee, issue labels need to be applied as well.\n\n```\n/label ~\"type::feature\"\n```\n\nYou can also assign multiple labels at once:\n\n```\n/label ~devops::create ~group::editor ~\"Category::Web IDE\"\n```\n\n![GitLab Quick Actions: Multiple labels](https://about.gitlab.com/images/blogimages/improve-your-gitlab-productivity-10-tips/quick_action_label_multiple.png)\nHow to apply multiple labels using GitLab quick actions.\n\nThe issue needs to be assigned to the next milestone. This can be done with another quick action:\n\n```\n/milestone %13.10\n```\n\nNote that 13.9 release planning already happened last month. The [product kickoff](/direction/kickoff/) highlights the planned features.\n\nThe keyboard shortcut `cmd + enter` now creates the issue without clicking a button.\n\nSo far, we were able to complete a lot of the necessary workflows around issues in one go, and without ever leaving the text box.\n\nAfter reviewing the issue I created, I remembered that this issue should be assigned to the `FY22Q1 Performance OKRs` epic. Again, we can use a quick action. It’s important to note here that referencing an epic works with the `&` character. When we type this character, we can start to search for the epic by typing its name.\n\n```\n/epic & \u003Csearch>\n```\n\nThis will turn into something like this:\n\n```\n/epic &123\n```\n\nAll quick actions can be used in a new comment and again using `cmd + enter` to save it.\n\nThe `FY22Q1 Performance OKRs` epic still needs to be added to a parent engineering OKR epic. So I'll navigate to the now-linked epic and use another quick action to set the parent epic.\n\n```\n/parent_epic & \u003Csearch>\n```\n\nWhen working with multiple levels of epics, remember to keep practicing quick actions to create visual epic trees quickly. That’s all for now from my manager's side.\n\n### Michael: A developer starts with code\n\nI work on the [Developer Evangelism team](/handbook/marketing/developer-relations/developer-evangelism/) at GitLab, and although I'm not technically a developer in the typical sense I still work with code on a daily basis. The average day starts with a new to-do. Today's to-do points me to the new issue that Roman created. After reviewing the issue requirements and defining the changes to be implemented, I start work: I'll clean up the work environment, pull the latest changes from the default branch (main/master), and create a new Git branch in my local terminal.\n\nAfter a few commits, my work day nears its end. I decide to publish the local Git branch and create a new Merge Request (MR). After creating the MR, the triage workflow kicks off. I mark the [MR as draft](https://docs.gitlab.com/ee/user/project/merge_requests/drafts.html) to prevent the workflow from starting before the MR is ready:\n\n```\n/draft\n```\n\nThe next day, I continue working on the MR and finish everything that was planned, so I need to remove the draft designation. The `draft` quick action is a toggle, so I can use it to assign and remove the `Draft` marker.\n\n```\n/draft\n```\n\nThe next step is to assign a reviewer for the MR. GitLab 13.7 added [merge request reviewers](/blog/merge-request-reviewers/), which means we can leave the MR assignee untouched. I'll use the livesearch to assign the right reviewer with a leading `@` character.\n\n```\n/assign_reviewer @ \u003Csearch>\n```\n\n![GitLab Quick Actions: Remove draft and assign reviewer](https://about.gitlab.com/images/blogimages/improve-your-gitlab-productivity-10-tips/quick_action_toggle_draft_assign_reviewer.png)\nHow to remove the draft and add a reviewer using GitLab quick actions.\n\nAfter the first round of review, I get feedback and items for follow-up. Since I am in the middle of a different tasks, I create a new to-do to remind myself of an open task to follow up on when I'm ready.\n\n```\n/todo\n```\n\nSince my work as a developer evanglist includes many topics and areas, I get distracted with other high priority tasks throughout the day. Later in the week, I'll come back to the MR. The review items have been addressed by team member suggestions and all threads are resolved now. The reviewer approves the MR with the quick action:\n\n```\n/approve\n```\n\nThe review process took a little while to complete, and because GitLab is a fast-changing project, the Git branch is outdated. I need to rebase against the default branch.\n\nBut since I am already working on something else, I do not want to stop what I am doing currently to rebase. Then I remember: GitLab 13.8 added the `/rebase` quick action. This schedules a new background job that attempts to rebase the branch, and stops operations if it fails.\n\nI open the MR and create a new comment. I start typing the rebase quick action, followed by `cmd+enter` to send it:\n\n```\n/rebase\n```\n\n![GitLab Quick Actions: Rebase](https://about.gitlab.com/images/blogimages/improve-your-gitlab-productivity-10-tips/quick_action_rebase.png){: .shadow.center}\nHow to rebase with GitLab quick actions.\n{: .note.text-center}\n\nPhew. It worked. The CI/CD pipeline is running, and I believe that the rebase did not break anything. I go to click the \"Merge after pipeline succeeds\" button, and remember there's a quick action for that.\n\n```\n/merge\n```\n\nThe quick action takes into account what is configured for the project: Either merge when the pipeline succeeds or add it to the [Merge Train](/blog/merge-trains-explained/).\n\nEverything happens automatically and I can continue working on other tasks. The manager (in this case, Roman) sees the issue being closed automatically using the `Closes` keyword. That's all from my developer's side.\n\nTip: [Automatically closing issues](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#closing-issues-automatically) after the MR has been merged is an amazing workflow for everyone, assuming the manager has set the milestone accordingly.\n\nAt GitLab, we have documented our [engineering workflows](/handbook/engineering/workflow/) which can be followed more efficiently with the quick actions shown in this blog post.\n\n### Quick actions + description templates = ❤️\n\nWe demonstrated different ways quick actions can be used to complete common tasks more efficiently. But they do not always have to be applied manually. One shortcut is to just add them to [description templates](https://docs.gitlab.com/ee/user/project/description_templates.html) so you do not have to worry about remembering them all. This way, you can also automatically assign users, add labels, and much more based on the template you apply. Using description templates helps with project contributions and allows everyone to focus on the feature proposal or bug report.\n\nLet’s try it! Create a new project, navigate into \"Issues > Labels\" and generate a default set of labels. Next, open the Web IDE and add a new file in `.gitlab/issue_templates/bug.md`. Add the following content:\n\n```\n# Summary\n\n# Steps to reproduce\n\n1.\n1.\n1.\n\n\u003C!-- Do not edit the section below -->\n/label ~\"type::bug\"\n/assign @YOURUSER\n```\n\nFirst, replace YOURUSER with your username (make sure you're logged in). Commit the new file to the default branch, and navigate into the issue list. Next, create a new issue and select `bug` from the dropdown. Add some content, and submit the issue. Finally, verify that the label and assignee are both set.\n\nTip: This is not limited to issue templates, it also works with MRs and epics. At GitLab we also often use this function to dynamically assign people based on reports created automatically. There are many opportunities to use description templates.\n\n### More tips and insights\n\nWe have not yet tried the following quick actions - can you help us out? :-)\n\n```\n/shrug\n/tableflip\n```\n\nThere are more [quick actions](https://docs.gitlab.com/ee/user/project/quick_actions.html) and [keyboard shortcuts](https://docs.gitlab.com/ee/user/shortcuts.html) available. In fact, GitLab user [Gary Bell](https://gitlab.com/garybell) shared great insights on quick actions in his \"Tanuki Tuesday\" blog series:\n\n- [Quick Actions](https://www.garybell.co.uk/quick-actions-in-gitlab/)\n- [Keyboard Shortcuts](https://www.garybell.co.uk/using-keyboard-shortcuts-in-gitlab/)\n\nLet us know in the comments below which quick actions most helped your productivity and if you have other creative ways of using quick actions.\n\nPS: We also support shortcuts at GitLab, and the most loved shortcut is `cmd + k` for inserting a Markdown URL.\n\nCover image by [Juan Gomez](https://unsplash.com/@nosoylasonia) on [Unsplash](https://unsplash.com/photos/kt-wA0GDFq8)\n{: .note}\n",[727,1347,915],{"slug":4361,"featured":6,"template":678},"improve-your-gitlab-productivity-with-these-10-tips","content:en-us:blog:improve-your-gitlab-productivity-with-these-10-tips.yml","Improve Your Gitlab Productivity With These 10 Tips","en-us/blog/improve-your-gitlab-productivity-with-these-10-tips.yml","en-us/blog/improve-your-gitlab-productivity-with-these-10-tips",{"_path":4367,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4368,"content":4374,"config":4379,"_id":4381,"_type":16,"title":4382,"_source":17,"_file":4383,"_stem":4384,"_extension":20},"/en-us/blog/engineering-teams-collaborating-remotely",{"title":4369,"description":4370,"ogTitle":4369,"ogDescription":4370,"noIndex":6,"ogImage":4371,"ogUrl":4372,"ogSiteName":692,"ogType":693,"canonicalUrls":4372,"schema":4373},"How to carry out remote work team collaboration","Some tips for successful asynchronous collaboration from all-remote engineering teams.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681893/Blog/Hero%20Images/remoteengineering.jpg","https://about.gitlab.com/blog/engineering-teams-collaborating-remotely","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to carry out remote work team collaboration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2021-02-09\",\n      }",{"title":4369,"description":4370,"authors":4375,"heroImage":4371,"date":4376,"body":4377,"category":14,"tags":4378},[3676],"2021-02-09","\n\n_This post is the third in our ongoing series about remote work and engineering. Check out the previous posts, [Tips for engineering managers learning to lead remotely](/blog/tips-for-managing-engineering-teams-remotely/), and [Tips for remote pair programming](/blog/remote-pair-programming-tips/)._\n\nAlmost a year into the pandemic, it’s still unclear when it will be safe to head back into the office. While many companies have resolved the initial growing pains of transitioning from a colocated to an all-remote workplace, we want to help your teams go from surviving to thriving by sharing some strategies to [improve remote work collaboration](/company/culture/all-remote/collaboration-and-whiteboarding/).\n\n## Remote working and asynchronous communication\n\nTraditional methods of communication and project management might work in a colocated office setting, but don’t necessarily translate to a remote environment. In a video on GitLab Unfiltered (our company YouTube channel where team members share their work with the public), [Austin Regnery](/company/team/#aregnery), product designer on Manage: Compliance at GitLab, and [Nick Post](/company/team/#npost), senior product designer on Manage: Optimize at GitLab, talk about the growing pains of transitioning from working synchronously to asynchronously.\n\n\"Emails and meetings... [it's all] email, email, email, meeting, PowerPoint, that’s the modus operandi for how companies collaborated,\" says Nick. \"And it’s something that companies and teams have really held on to.\"\n\nAsynchronous communication challenges the traditional modes of workplace communication, but at GitLab, we’ve discovered that this more modern method of collaboration is more efficient and effective in delivering business value to our customers and helping our team members achieve a work-life balance.\n\nWatch the video below to learn how these designers work asynchronously.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/stLBy9TWJBw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Remote work collaboration: be sensitive to timezones\n\nWhen your team is working in the same office, the workday begins and ends at roughly the same time for everyone. At GitLab, we work asynchronously and our team is globally distributed, which means work is completed on a 24-hour clock instead of just eight hours. \n\nOne of the biggest challenges with working asynchronously is ensuring that all team members feel a sense of belonging across timezones. \n\n\"Timezones matter,\" says [Nuritzi Sanchez](/company/team/#nuritzi), senior open source program manager at GitLab. \"Make sure you're not leaving out team members in one locale. Have meetings at different times if needed (e.g., once a week in a NORAM-friendly time, next week in APAC-friendly time, etc).\" \n\nMaking the switch from synchronous to asynchronous work isn’t easy but really pays off. Learn more about [how we work asynchronously at GitLab](/company/culture/all-remote/asynchronous/#async-at-gitlab).\n\n### Remote work collaboration: maximize synchronous time \n\nThere are times when synchronous communication is the best option, such as during weekly team meetings and one-on-ones. Nuritzi explains that there are a few teams at GitLab that have adopted a particular structure to make the most out of every team meeting. \n\n*  Always have an agenda: Every meeting needs an agenda, usually in Google Docs, that allows team members to add discussion items and notes before and during the meeting. \n*  Notes are key: \"In OSS communities, meeting notes or IRC chats are something that are usually posted publicly later so everyone who needs to can catch up,\" says Nuritiz. \n*  Start with check-ins: Team members voluntarily share whether they're at a green/yellow/red level in their work life and personal life. Managers are always advised to participate in these check-ins to set a collaborative tone. \n* FYIs: Not everything on the agenda will merit discussion. Add FYIs for items that should be shared with the team but don't require extensive dialogue. \n*  Discussion topics: Some agenda items will need to be discussed with the team, add these in the discussion items section of the agenda.\n*  Meetings are optional: Not every team member will be able to attend every meeting, and that's OK. Team members that aren't present can still participate by adding discussion items, FYIs, and notes to the agenda that can be shared by their fellow team members. \n*  Try to make it fun: We start and end every [Inbound Marketing weekly recap meeting](https://youtube.com/playlist?list=PL05JrBw4t0KppgWkSa3YgDgc_qUTKsBCs) with music to liven up Thursdays for our globally distributed team. \n\n## How to make remote onboarding feel welcoming\n\nOnboarding is just one example of a workplace process that has been impacted by the pandemic. This makes having empathy for the new hire even more important than usual. Empathetic onboarding means framing the process from the perspective of the new hire, says [Alexandra Sunderland](https://ca.linkedin.com/in/alexandrasunderland), engineering manager at Fellow.\n\nAlexandra iterated on her onboarding process over a number of years and has since developed a six-step framework that she presented at our virtual user conference, GitLab Commit, last year. The six steps are: (1) Focus on relationships, (2) write knowledge down, (3) create an FAQ, (4) set goals and milestones, (5) set up their physical space, (6) ask for feedback.\n\nWatch the video below to learn how Fellow onboards engineers remotely. \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/tdWxlpN8dUk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nBeyond making sure new team members are set up with a functional workspace, the secret to successful onboarding is assuming that the team member has the skills and knowledge necessary to do their job, but still needs to learn the context in which to do it, according to Alexandra. \n\nGitLab has a [unique onboarding process](/handbook/people-group/general-onboarding/), in that new hires are given a detailed checklist in an issue and are assigned an onboarding buddy. This onboarding process is a crash course in working as a [manager of one](https://handbook.gitlab.com/handbook/values/#managers-of-one) in an asynchronous workplace. \n\n## How to collaborate on releases remotely\n\nMattermost's [Aaron Rothschild](https://www.linkedin.com/in/arothschild), senior product manager, and [Paul Rothrock](https://www.linkedin.com/in/icelander), customer engineer, describe how their dashboard that provides visibility into the DevOps process can be used with tools such as Jira, Jenkins, and GitLab, to release software remotely. Watch the presentation from our user conference to see an example of how the team collaborates to deliver a release.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/QBG0-YaDXu0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## How to stay social when you’re working alone\n\nCommunication technologies, specifically Slack, can help to cultivate a sense of community and belonging within our teams. We publicly recognize team members that contributed to success in a #team-member-updates channel on Slack, where we announce bonuses and promotions for team members across the company. We also show appreciation for our collaborators in the #thanks channel.\n\n### The (virtual) water cooler\n\nThe pandemic has us all practicing social distancing, but at GitLab, we worked hard to try and replicate the social aspect of the office through a [few different informal communication programs](/company/culture/all-remote/informal-communication/), such as our coffee chats, and Slack channels devoted to different extracurriculars (I’m fond of the #dog and #baking channels, personally). The [Donut bot on Slack is a neat social feature](/company/culture/all-remote/informal-communication/#the-donut-bot). The bot will randomly introduce you to a team member that you may not otherwise have collaborated with in your daily work, and invites the two team members to [set up a coffee chat](/company/culture/all-remote/informal-communication/#coffee-chats). \n\n## Up-level your remote work skills\n\n[GitLab launched a free program on Coursera](https://www.coursera.org/learn/remote-team-management) to help managers with the transition of managing a team remotely. The course is free to join and is packed with valuable information to help companies adapt to working remotely.\n\nCover image by [Chris Montgomery](https://unsplash.com/@cwmonty?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/remote-work?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[2409,1347],{"slug":4380,"featured":6,"template":678},"engineering-teams-collaborating-remotely","content:en-us:blog:engineering-teams-collaborating-remotely.yml","Engineering Teams Collaborating Remotely","en-us/blog/engineering-teams-collaborating-remotely.yml","en-us/blog/engineering-teams-collaborating-remotely",{"_path":4386,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4387,"content":4393,"config":4400,"_id":4402,"_type":16,"title":4403,"_source":17,"_file":4404,"_stem":4405,"_extension":20},"/en-us/blog/ci-deployment-and-environments",{"title":4388,"description":4389,"ogTitle":4388,"ogDescription":4389,"noIndex":6,"ogImage":4390,"ogUrl":4391,"ogSiteName":692,"ogType":693,"canonicalUrls":4391,"schema":4392},"How to use GitLab CI to deploy to multiple environments","We walk you through different scenarios to demonstrate the versatility and power of GitLab CI.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662033/Blog/Hero%20Images/intro.jpg","https://about.gitlab.com/blog/ci-deployment-and-environments","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab CI to deploy to multiple environments\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ivan Nemytchenko\"},{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2021-02-05\",\n      }",{"title":4388,"description":4389,"authors":4394,"heroImage":4390,"date":4396,"body":4397,"category":14,"tags":4398,"updatedDate":4399},[4395,1101],"Ivan Nemytchenko","2021-02-05","This post is a success story of one imaginary news portal, and you're the happy\nowner, the editor, and the only developer. Luckily, you already host your project\ncode on GitLab.com and know that you can\n[run tests with GitLab CI/CD](https://docs.gitlab.com/ee/ci/testing/).\nNow you’re curious if it can be [used for deployment](/blog/how-to-keep-up-with-ci-cd-best-practices/), and how far can you go with it.\n\nTo keep our story technology stack-agnostic, let's assume that the app is just a\nset of HTML files. No server-side code, no fancy JS assets compilation.\n\nDestination platform is also simplistic – we will use [Amazon S3](https://aws.amazon.com/s3/).\n\nThe goal of the article is not to give you a bunch of copy-pasteable snippets.\nThe goal is to show the principles and features of [GitLab CI](/solutions/continuous-integration/) so that you can easily apply them to your technology stack.\n{: .alert .alert-warning}\n\nLet’s start from the beginning. There's no continuous integration (CI) in our story yet.\n\n## At the starting line\n\n**Deployment**: In your case, it means that a bunch of HTML files should appear on your\nS3 bucket (which is already configured for\n[static website hosting](http://docs.aws.amazon.com/AmazonS3/latest/dev/HowDoIWebsiteConfiguration.html?shortFooter=true)).\n\nThere are a million ways to do it. We’ll use the\n[awscli](http://docs.aws.amazon.com/cli/latest/reference/s3/cp.html#examples) library,\nprovided by Amazon.\n\nThe full command looks like this:\n\n```shell\naws s3 cp ./ s3://yourbucket/ --recursive --exclude \"*\" --include \"*.html\"\n```\n\n![Manual deployment](https://about.gitlab.com/images/blogimages/ci-deployment-and-environments/13.jpg){: .center}\nPushing code to repository and deploying are separate processes.\n{: .note .text-center}\n\nImportant detail: The command\n[expects you](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#config-settings-and-precedence)\nto provide `AWS_ACCESS_KEY_ID` and  `AWS_SECRET_ACCESS_KEY` environment\nvariables. Also you might need to specify `AWS_DEFAULT_REGION`.\n{: .alert .alert-info}\n\nLet’s try to automate it using [GitLab CI](/solutions/continuous-integration/).\n\n## The first automated deployment\n\nWith GitLab, there's no difference on what commands to run.\nYou can set up GitLab CI in a way that tailors to your specific needs, as if it was your local terminal on your computer. As long as you execute commands there, you can tell CI to do the same for you in GitLab.\nPut your script to `.gitlab-ci.yml` and push your code – that’s it: CI triggers\na _job_ and your commands are executed.\n\nNow, let's add some context to our story: Our website is small, there is 20-30 daily\nvisitors and the code repository has only one default branch: `main`.\n\nLet's start by specifying a _job_ with the command from above in the `.gitlab-ci.yml` file:\n\n```yaml\ndeploy:\n  script: aws s3 cp ./ s3://yourbucket/ --recursive --exclude \"*\" --include \"*.html\"\n```\n\nNo luck:\n![Failed command](https://about.gitlab.com/images/blogimages/ci-deployment-and-environments/fail1.png){: .shadow}\n\nIt is our _job_ to ensure that there is an `aws` executable.\nTo install `awscli` we need `pip`, which is a tool for Python packages installation.\nLet's specify Docker image with preinstalled Python, which should contain `pip` as well:\n\n```yaml\ndeploy:\n  image: python:latest\n  script:\n  - pip install awscli\n  - aws s3 cp ./ s3://yourbucket/ --recursive --exclude \"*\" --include \"*.html\"\n```\n\n![Automated deployment](https://about.gitlab.com/images/blogimages/ci-deployment-and-environments/14.jpg){: .center}\nYou push your code to GitLab, and it is automatically deployed by CI.\n  {: .note .text-center}\n\nThe installation of `awscli` extends the job execution time, but that is not a big\ndeal for now. If you need to speed up the process, you can always [look for\na Docker image](https://hub.docker.com/explore/) with preinstalled `awscli`,\nor create an image by yourself.\n{: .alert .alert-warning}\n\nAlso, let’s not forget about these environment variables, which you've just grabbed\nfrom [AWS Console](https://console.aws.amazon.com/):\n\n```yaml\nvariables:\n  AWS_ACCESS_KEY_ID: \"AKIAIOSFODNN7EXAMPLE\"\n  AWS_SECRET_ACCESS_KEY: \"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\"\ndeploy:\n  image: python:latest\n  script:\n  - pip install awscli\n  - aws s3 cp ./ s3://yourbucket/ --recursive --exclude \"*\" --include \"*.html\"\n```\nIt should work, but keeping secret keys open, even in a private repository,\nis not a good idea. Let's see how to deal with this situation.\n\n### Keeping secret things secret\n\nGitLab has a special place for secret variables: **Settings > CI/CD > Variables**\n\n![Picture of Variables page](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674076/Blog/Content%20Images/add-variable-updated.png)\n\nWhatever you put there will be turned into **environment variables**.\nChecking the \"Mask variable\" checkbox will obfuscate the variable in job logs. Also, checking the \"Protect variable\" checkbox will export the variable to only pipelines running on protected branches and tags. Users with Owner or Maintainer permissions to a project will have access to this section.\n\nWe could remove `variables` section from our CI configuration. However, let’s use it for another purpose.\n\n### How to specify and use variables that are not secret\n\nWhen your configuration gets bigger, it is convenient to keep some of the\nparameters as variables at the beginning of your configuration. Especially if you\nuse them in more than one place. Although it is not the case in our situation yet,\nlet's set the S3 bucket name as a [**variable**](https://docs.gitlab.com/ee/ci/variables/) for the purpose of this demonstration:\n\n```yaml\nvariables:\n  S3_BUCKET_NAME: \"yourbucket\"\ndeploy:\n  image: python:latest\n  script:\n  - pip install awscli\n  - aws s3 cp ./ s3://$S3_BUCKET_NAME/ --recursive --exclude \"*\" --include \"*.html\"\n```\n\nSo far so good:\n\n![Successful build](https://about.gitlab.com/images/blogimages/ci-deployment-and-environments/build.png){: .shadow.medium.center}\n\nIn our hypothetical scenario, the audience of your website has grown, so you've hired a developer to help you.\nNow you have a team. Let's see how teamwork changes the GitLab CI workflow.\n\n## How to use GitLab CI with a team\n\nNow, that there are two users working in the same repository, it is no longer convenient\nto use the `main` branch for development. You decide to use separate branches\nfor both new features and new articles and merge them into `main` when they are ready.\n\nThe problem is that your current CI config doesn’t care about branches at all.\nWhenever you push anything to GitLab, it will be deployed to S3.\n\nPreventing this problem is straightforward. Just add `only: main` to your `deploy` job.\n\n![Automated deployment of main branch](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674076/Blog/Content%20Images/15-updated.png){: .center}\nYou don't want to deploy every branch to the production website but it would also be nice to preview your changes from feature-branches somehow.\n{: .note .text-center}\n\n### How to set up a separate place for testing code\n\nThe person you recently hired, let's call him Patrick, reminds you that there is a featured called\n[GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/). It looks like a perfect candidate for\na place to preview your work in progress.\n\nTo [host websites on GitLab Pages](/blog/gitlab-pages-setup/) your CI configuration file should satisfy three simple rules:\n\n- The _job_ should be named `pages`\n- There should be an `artifacts` section with folder `public` in it\n- Everything you want to host should be in this `public` folder\n\nThe contents of the public folder will be hosted at `http://\u003Cusername>.gitlab.io/\u003Cprojectname>/`\n{: .alert .alert-info}\n\nAfter applying the [example config for plain-html websites](https://gitlab.com/pages/plain-html/blob/master/.gitlab-ci.yml),\nthe full CI configuration looks like this:\n\n```yaml\nvariables:\n  S3_BUCKET_NAME: \"yourbucket\"\n\ndeploy:\n  image: python:latest\n  script:\n  - pip install awscli\n  - aws s3 cp ./ s3://$S3_BUCKET_NAME/ --recursive --exclude \"*\" --include \"*.html\"\n  only:\n  - main\n\npages:\n  image: alpine:latest\n  script:\n  - mkdir -p ./public\n  - cp ./*.html ./public/\n  artifacts:\n    paths:\n    - public\n  except:\n  - main\n```\n\nWe specified two jobs. One job deploys the website for your customers to S3 (`deploy`).\nThe other one (`pages`) deploys the website to GitLab Pages.\nWe can name them \"Production environment\" and \"Staging environment\", respectively.\n\n![Deployment to two places](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674076/Blog/Content%20Images/16-updated.png){: .center}\nAll branches, except main, will be deployed to GitLab Pages.\n{: .note .text-center}\n\n## Introducing environments\n\nGitLab offers\n [support for environments](https://docs.gitlab.com/ee/ci/environments/) (including dynamic environments and static environments),\n and all you need to do it to specify the corresponding environment for each deployment *job*:\n\n```yaml\nvariables:\n  S3_BUCKET_NAME: \"yourbucket\"\n\ndeploy to production:\n  environment: production\n  image: python:latest\n  script:\n  - pip install awscli\n  - aws s3 cp ./ s3://$S3_BUCKET_NAME/ --recursive --exclude \"*\" --include \"*.html\"\n  only:\n  - main\n\npages:\n  image: alpine:latest\n  environment: staging\n  script:\n  - mkdir -p ./public\n  - cp ./*.html ./public/\n  artifacts:\n    paths:\n    - public\n  except:\n  - main\n```\n\nGitLab keeps track of your deployments, so you always know what is currently being deployed on your servers:\n\n![List of environments](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674076/Blog/Content%20Images/envs-updated.png){: .shadow.center}\n\nGitLab provides full history of your deployments for each of your current environments:\n\n![List of deployments to staging environment](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674077/Blog/Content%20Images/staging-env-detail-updated.png){: .shadow.center}\n\n![Environments](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674077/Blog/Content%20Images/17-updated.png){: .center}\n\nNow, with everything automated and set up, we’re ready for the new challenges that are just around the corner.\n\n## How to troubleshoot deployments\n\nIt has just happened again.\nYou've pushed your feature-branch to preview it on staging and a minute later Patrick pushed\nhis branch, so the staging environment was rewritten with his work. Aargh!! It was the third time today!\n\nIdea! \u003Ci class=\"far fa-lightbulb\" style=\"color:#FFD900; font-size:.85em\" aria-hidden=\"true\">\u003C/i> Let's use Slack to notify us of deployments, so that people will not push their stuff if another one has been just deployed!\n\n> Learn how to [integrate GitLab with Slack](https://docs.gitlab.com/ee/user/project/integrations/gitlab_slack_application.html).\n\n## Teamwork at scale\n\nAs the time passed, your website became really popular, and your team has grown from two people to eight people.\nPeople develop in parallel, so the situation when people wait for each other to\npreview something on Staging has become pretty common. \"Deploy every branch to staging\" stopped working.\n\n![Queue of branches for review on Staging](https://about.gitlab.com/images/blogimages/ci-deployment-and-environments/queue.jpg){: .center}\n\nIt's time to modify the process one more time. You and your team agreed that if\nsomeone wants to see their changes on the staging\nserver, they should first merge the changes to the \"staging\" branch.\n\nThe change of `.gitlab-ci.yml` is minimal:\n\n```yaml\nexcept:\n- main\n```\n\nis now changed to\n\n```yaml\nonly:\n- staging\n```\n\n![Staging branch](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674077/Blog/Content%20Images/18-updated.png){: .center}\nPeople have to merge their feature branches before preview on the staging server.\n{: .note .text-center}\n\nOf course, it requires additional time and effort for merging, but everybody agreed that it is better than waiting.\n\n### How to handle emergencies\n\nYou can't control everything, so sometimes things go wrong. Someone merged branches incorrectly and\npushed the result straight to production exactly when your site was on top of HackerNews.\nThousands of people saw your completely broken layout instead of your shiny main page.\n\nLuckily, someone found the **Rollback** button, so the\nwebsite was fixed a minute after the problem was discovered.\n\n![List of environments](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674077/Blog/Content%20Images/prod-env-rollback-arrow-updated.png){: .shadow.center}\nRollback relaunches the previous job with the previous commit\n{: .note .text-center}\n\nAnyway, you felt that you needed to react to the problem and decided to turn off\nauto-deployment to Production and switch to manual deployment.\nTo do that, you needed to add `when: manual` to your _job_.\n\nAs you expected, there will be no automatic deployment to Production after that.\nTo deploy manually go to **CI/CD > Pipelines**, and click the button:\n\n![Skipped job is available for manual launch](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674076/Blog/Content%20Images/manual-pipeline-arrow-updated.png){: .shadow.center}\n\nFast forward in time. Finally, your company has turned into a corporation. Now, you have hundreds of people working on the website,\nso all the previous compromises no longer work.\n\n### Time to start using Review Apps\n\nThe next logical step is to boot up a temporary instance of the application per feature branch for review.\n\nIn our case, we set up another bucket on S3 for that. The only difference is that\nwe copy the contents of our website to a \"folder\" with the name of the\nthe development branch, so that the URL looks like this:\n\n`http://\u003CREVIEW_S3_BUCKET_NAME>.s3-website-us-east-1.amazonaws.com/\u003Cbranchname>/`\n\nHere's the replacement for the `pages` _job_ we used before:\n\n```yaml\nreview apps:\n  variables:\n    S3_BUCKET_NAME: \"reviewbucket\"\n  image: python:latest\n  environment: review\n  script:\n  - pip install awscli\n  - mkdir -p ./$CI_BUILD_REF_NAME\n  - cp ./*.html ./$CI_BUILD_REF_NAME/\n  - aws s3 cp ./ s3://$S3_BUCKET_NAME/ --recursive --exclude \"*\" --include \"*.html\"\n```\n\nThe interesting thing is where we got this `$CI_BUILD_REF_NAME` variable from.\nGitLab predefines [many environment variables](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html) so that you can use them in your jobs.\n\nNote that we defined the `S3_BUCKET_NAME` variable inside the *job*. You can do this to rewrite top-level definitions.\n{: .alert .alert-info}\n\nVisual representation of this configuration:\n![Review apps]![How to use GitLab CI - update - 19 - updated](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674077/Blog/Content%20Images/19-updated.png){: .illustration}\n\nThe details of the Review Apps implementation varies widely, depending upon your real technology\nstack and on your deployment process, which is outside the scope of this blog post.\n\nIt will not be that straightforward, as it is with our static HTML website.\nFor example, you had to make these instances temporary, and booting up these instances\nwith all required software and services automatically on the fly is not a trivial task.\nHowever, it is doable, especially if you use Docker containers, or at least Chef or Ansible.\n\nWe'll cover deployment with Docker in a future blog post.\nI feel a bit guilty for simplifying the deployment process to a simple HTML files copying, and not\nadding some hardcore scenarios. If you need some right now, I recommend you read the article [\"Building an Elixir Release into a Docker image using GitLab CI.\"](/blog/building-an-elixir-release-into-docker-image-using-gitlab-ci-part-1/)\n\nFor now, let's talk about one final thing.\n\n### Deploying to different platforms\n\nIn real life, we are not limited to S3 and GitLab Pages. We host, and therefore,\ndeploy our apps and packages to various services.\n\nMoreover, at some point, you could decide to move to a new platform and will need to rewrite all your deployment scripts.\nYou can use a gem called `dpl` to minimize the damage.\n\nIn the examples above we used `awscli` as a tool to deliver code to an example\nservice (Amazon S3).\nHowever, no matter what tool and what destination system you use, the principle is the same:\nYou run a command with some parameters and somehow pass a secret key for authentication purposes.\n\nThe `dpl` deployment tool utilizes this principle and provides a\nunified interface for [this list of providers](https://github.com/travis-ci/dpl#supported-providers).\n\nHere's how a production deployment _job_ would look if we use `dpl`:\n\n```yaml\nvariables:\n  S3_BUCKET_NAME: \"yourbucket\"\n\ndeploy to production:\n  environment: production\n  image: ruby:latest\n  script:\n  - gem install dpl\n  - dpl --provider=s3 --bucket=$S3_BUCKET_NAME\n  only:\n  - main\n```\n\nIf you deploy to different systems or change destination platform frequently, consider\nusing `dpl` to make your deployment scripts look uniform.\n\n## Five key takeaways\n\n1. Deployment is just a command (or a set of commands) that is regularly executed. Therefore it can run inside GitLab CI.\n2. Most times you'll need to provide some secret key(s) to the command you execute. Store these secret keys in **Settings > CI/CD > Variables**.\n3. With GitLab CI, you can flexibly specify which branches to deploy to.\n4. If you deploy to multiple environments, GitLab will conserve the history of deployments,\nwhich allows you to rollback to any previous version.\n5. For critical parts of your infrastructure, you can enable manual deployment from GitLab interface, instead of automated deployment.\n\n\u003Cstyle>\nimg.illustration {\n  padding-left: 12%;\n  padding-right: 12%;\n\n}\n@media (max-width: 760px) {\n  img.illustration {\n    padding-left: 0px;\n    padding-right: 0px;\n  }\n}\n\u003C/style>\n",[832,937,726],"2024-07-22",{"slug":4401,"featured":6,"template":678},"ci-deployment-and-environments","content:en-us:blog:ci-deployment-and-environments.yml","Ci Deployment And Environments","en-us/blog/ci-deployment-and-environments.yml","en-us/blog/ci-deployment-and-environments",{"_path":4407,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4408,"content":4414,"config":4420,"_id":4422,"_type":16,"title":4423,"_source":17,"_file":4424,"_stem":4425,"_extension":20},"/en-us/blog/how-we-automatically-fixed-hundreds-of-ruby-2-7-deprecation-warnings",{"title":4409,"description":4410,"ogTitle":4409,"ogDescription":4410,"noIndex":6,"ogImage":4411,"ogUrl":4412,"ogSiteName":692,"ogType":693,"canonicalUrls":4412,"schema":4413},"How we automatically fixed thousands of Ruby 2.7 deprecation warnings","The upgrade to Ruby 2.7 for GitLab involved thousands of deprecation warnings across hundreds of files. Here's how we fixed most of them.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681882/Blog/Hero%20Images/daria-nepriakhina-zNU3ErDAbAw-unsplash.jpg","https://about.gitlab.com/blog/how-we-automatically-fixed-hundreds-of-ruby-2-7-deprecation-warnings","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we automatically fixed thousands of Ruby 2.7 deprecation warnings\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Thong Kuah\"}],\n        \"datePublished\": \"2021-02-03\",\n      }",{"title":4409,"description":4410,"authors":4415,"heroImage":4411,"date":4417,"body":4418,"category":14,"tags":4419},[4416],"Thong Kuah","2021-02-03","\n\nRuby 3.0 was just released on Dec. 25, 2020, with some new features and some breaking changes.\nGitLab was at Ruby 2.6, and we wanted to upgrade to Ruby 2.7 in preparation to eventually upgrade to Ruby 3.\n\nIn Ruby 3.0, [positional and keyword arguments will be separated](https://www.ruby-lang.org/en/news/2019/12/12/separation-of-positional-and-keyword-arguments-in-ruby-3-0/). To help developers prepare for this, in Ruby 2.7,\nwarnings were added. In GitLab, we discovered we have [thousands\nof such warnings](https://gitlab.com/gitlab-org/gitlab/-/issues/257438) across hundreds of files:\n\n```\nwarning: Using the last argument as keyword parameters is deprecated; maybe ** should be added to the call\n```\n\n## Boring solutions\n\nTo address this warning, the obvious, and boring solution was to simply add `**` to the last keyword argument.\nFor the most part, this is what we did. However, while this was under way, we also developed a RuboCop check that could\ndetect, and automatically fix the keyword arguments. The benefit for this approach was that we can\n[autocorrect](https://docs.rubocop.org/rubocop/usage/auto_correct.html) any existing warnings en masse.\n\nThe tricky part about this is that RuboCop is designed to statically analyze Ruby code, whereas the warnings were\ngenerated by Ruby at runtime.\n\n## A way forward\n\nAfter some research, we found a way to utilize our comprehensive RSpec test suite to\ngather all the warnings using the [Deprecation Toolkit gem](https://github.com/shopify/deprecation_toolkit). We also\nconsidered using the [warning gem](https://github.com/jeremyevans/ruby-warning) at one point, but preferred Deprecation Toolkit\nas the results were easier to process.\n\nDeprecation Toolkit supports RSpec out of the box, so it was really simple to configure. It also has a simple YAML-based file format to record all deprecations. We then adapted this to record deprecation warnings for Ruby 2.7\nlast keyword arguments with:\n\n```ruby\n  kwargs_warnings = [\n    # Taken from https://github.com/jeremyevans/ruby-warning/blob/1.1.0/lib/warning.rb#L18\n    %r{warning: (?:Using the last argument (?:for `.+' )?as keyword parameters is deprecated; maybe \\*\\* should be added to the call|Passing the keyword argument (?:for `.+' )?as the last hash parameter is deprecated|Splitting the last argument (?:for `.+' )?into positional and keyword parameters is deprecated|The called method (?:`.+' )?is defined here)\\n\\z}\n  ]\n  DeprecationToolkit::Configuration.warnings_treated_as_deprecation = kwargs_warnings\n```\n\nLastly, we wrote a new RuboCop check, called\n[`Lint/LastKeywordArgument`](https://gitlab.com/gitlab-org/gitlab/-/blob/632b7768f7f9014951170a006489d66b34001c68/rubocop/cop/lint/last_keyword_argument.rb),\nthat checks against the YAML files generated by Deprecation Toolkit, and\ngenerates offenses. Now we can very quickly, statically check the whole GitLab\ncodebase, and even autocorrect! You can see how [Deprecation Toolkit and the\n`LastKeywordArgument` check was put together in this merge\nrequest](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/47720). You can\nsee a sample output from running the `LastKeywordArgument` cop check:\n\n![LastKeywordArgument RuboCop offenses](https://about.gitlab.com/images/blogimages/last-keyword-argument-rubocop-offenses.png){: .shadow.center}\nSample output from running the `LastKeywordArgument` cop check\n{: .note .text-center}\n\n## Automatically fix everything\n\nNow we have an automatic RuboCop check, which can also autocorrect, we create merge requests to autocorrect!\nFor example, we autocorrected 62 instances across [39 spec files](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/48407).\nAutomation for the win!\n\nWe then went one step further, and integrated this in our GitLab CI pipelines. Using the `artifacts` feature of GitLab CI, we\ngathered the `deprecations` directory from all RSpec jobs (we have about 400 such jobs). After all the RSpec jobs have passed, we then made a `post-test` job to\n[check the results](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/49792) with the `LastKeywordArgument` cop. Below is a\nsnippet of the GitLab CI `.gitlab-ci.yml` configuration:\n\n```yaml\nstages:\n  - test\n  - post-test\n\n# This inherited job is used by all RSpec jobs\n.rspec-base:\n  stage: test\n  artifacts:\n    - deprecations/\n\n# GitLab CI job artifacts from previous stages are passed to this job\nrspec:deprecations:\n  stage: post-test\n  script:\n    - bundle exec rubocop --only Lint/LastKeywordArgument --parallel\n  artifacts:\n    - deprecations/\n```\n\nThis enabled us to have a single job where [we can see all deprecation warnings](https://gitlab.com/gitlab-org/gitlab/-/jobs/991299621).\n\n## Conclusion\n\nWith this measure we went from about 30,000 warnings related to keyword arguments to about 800 remaining warnings, largely stemming from\ndependencies. Feel free to follow our progress in [GitLab issue #257438](https://gitlab.com/gitlab-org/gitlab/-/issues/257438), and contribute to\nfix the remaining warnings if you are interested!\n\nCover image by [Daria Nepriakhina](https://unsplash.com/@epicantus) on [Unsplash](https://unsplash.com/photos/zNU3ErDAbAw)\n{: .note}\n",[703],{"slug":4421,"featured":6,"template":678},"how-we-automatically-fixed-hundreds-of-ruby-2-7-deprecation-warnings","content:en-us:blog:how-we-automatically-fixed-hundreds-of-ruby-2-7-deprecation-warnings.yml","How We Automatically Fixed Hundreds Of Ruby 2 7 Deprecation Warnings","en-us/blog/how-we-automatically-fixed-hundreds-of-ruby-2-7-deprecation-warnings.yml","en-us/blog/how-we-automatically-fixed-hundreds-of-ruby-2-7-deprecation-warnings",{"_path":4427,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4428,"content":4434,"config":4442,"_id":4444,"_type":16,"title":4445,"_source":17,"_file":4446,"_stem":4447,"_extension":20},"/en-us/blog/we-need-to-talk-no-proxy",{"title":4429,"description":4430,"ogTitle":4429,"ogDescription":4430,"noIndex":6,"ogImage":4431,"ogUrl":4432,"ogSiteName":692,"ogType":693,"canonicalUrls":4432,"schema":4433},"We need to talk: Can we standardize NO_PROXY?","Subtle differences in proxy setting implementations led to surprising problems for a GitLab customer. Here's how we got to the root of it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659507/Blog/Hero%20Images/AdobeStock_623844718.jpg","https://about.gitlab.com/blog/we-need-to-talk-no-proxy","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We need to talk: Can we standardize NO_PROXY?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Stan Hu\"}],\n        \"datePublished\": \"2021-01-27\",\n      }",{"title":4429,"description":4430,"authors":4435,"heroImage":4431,"date":4436,"updatedDate":4437,"body":4438,"category":14,"tags":4439},[670],"2021-01-27","2025-06-09","If you've used a Web proxy server before, you're probably familiar with\nthe environment variables `http_proxy` or `HTTP_PROXY`. You may be less\nfamiliar with `no_proxy`, which provides a way to exclude traffic\ndestined to certain hosts from using the proxy. While HTTP is a\nwell-defined standard, no standard exists for how clients should handle\nthese variables. As a result, Web clients support these variables in\nsubtly different ways. For one GitLab customer, these differences led\nto a weekend of troubleshooting that uncovered why certain services\nstopped communicating.\n\n## What is a proxy server?\n\nA proxy server acts as an intermediary between your computer or network and the internet. When you send a request to access a website or other online resource, that request first goes to the proxy server. The proxy server then forwards the request to the actual destination and delivers the response back to you. Proxies can serve various purposes, including improving security, enhancing privacy, and controlling internet usage.\n\n## Proxy server environment variables\n\nLet's now look at what proxy server environment variables are, and how to define exemptions and handle exclusions with `no_proxy`.\n\n### Understanding proxy server environment variables \n\nToday, most Web clients support connection to proxy servers via\nenvironment variables:\n\n- `http_proxy` / `HTTP_PROXY`\n- `https_proxy` / `HTTPS_PROXY`\n- `no_proxy` / `NO_PROXY`\n\nThese variables tell the client what URL should be used to access the\nproxy servers and which exceptions should be made. For example, if you\nhad a proxy server listening on `http://alice.example.com:8080`, you\nmight use it via:\n\n```sh\nexport http_proxy=http://alice.example.com:8080\n```\n\nWhich proxy server gets used if troublesome Bob also defines the\nall-caps version, `HTTP_PROXY`?\n\n```sh\nexport HTTP_PROXY=http://bob.example.com:8080\n```\n\nThe answer surprised us: it depends. In some cases, the Alice proxy\nwins, and in other cases Bob wins. We'll discuss the details later.\n\n### Defining proxy exemptions with `no_proxy`\n\nWhat happens if you want to make exceptions? For example, suppose you\nwant to use a proxy server for everything but `internal.example.com` and\n`internal2.example.com`. That's where the `no_proxy` variable comes into\nplay. Then you would define `no_proxy` as follows:\n\n```sh\nexport no_proxy=internal.example.com,internal2.example.com\n```\n\n### Handling IP exclusions in `no_proxy`\n\nWhat if you want to exclude IP addresses? Can you use asterisks or\nwildcards? Can you use CIDR blocks (e.g. `192.168.1.1/32`)? The answer\nagain: it depends.\n\n## How did we get here?\n\nLet's dig into the evolution of proxy variables, and how they are used today.\n\n### The origins of proxy variables\n\nIn 1994, most Web clients used CERN's `libwww`, which [supported `http_proxy` and the `no_proxy` environment variables](https://courses.cs.vt.edu/~cs4244/spring.09/documents/Proxies.pdf).\n`libwww` only used the lowercase form of `http_proxy`, and the [`no_proxy` syntax was\nsimple](https://github.com/w3c/libwww/blob/8678b3dcb4191065ca39caea54bb1beba809a617/Library/src/HTAccess.c#L234-L239):\n\n```\nno_proxy is a comma- or space-separated list of machine\nor domain names, with optional :port part.  If no :port\npart is present, it applies to all ports on that domain.\n\nExample:\n\t\tno_proxy=\"cern.ch,some.domain:8001\"\n```\n\nNew clients emerged that added their own HTTP implementations without\nlinking `libwww`. In January 1996, Hrvoje Niksic released\n`geturl`, the predecessor of what is now `wget`.  A month later,\n`geturl`, [added support for `http_proxy` in v1.1](https://ftp.sunet.se/mirror/archive/ftp.sunet.se/pub/www/utilities/wget/old-versions/).\nIn May 1996, `geturl` v1.3 added support for `no_proxy`. Just as with\n`libwww`, `geturl` only supported the lowercase form.\n\nIn January 1998, Daniel Stenberg released `curl` v5.1, which [supported the `http_proxy` and `no_proxy` variables](https://github.com/curl/curl/blob/ae1912cb0d494b48d514d937826c9fe83ec96c4d/CHANGES#L929-L944).\nIn addition, `curl` allowed the uppercase forms, `HTTP_PROXY` and `NO_PROXY`.\n\nPlot twist: In March 2009, [curl v7.19.4](https://github.com/curl/curl/releases/tag/curl-7_19_4) dropped support for the\nuppercase form of `HTTP_PROXY` [due to security concerns](https://github.com/curl/curl/blob/30e7641d7d2eb46c0b67c0c495a0ea7e52333ee2/lib/url.c#L2250-L2261). However, while `curl` ignores `HTTP_PROXY`, `HTTPS_PROXY` still works today.\n\n### State of the variables today\n\nFast-forward to today. As my [colleague Nourdin el Bacha researched](https://gitlab.com/gitlab-com/support/support-team-meta/-/issues/2991),\nwe can see that how these proxy server variables are handled varies, depending\non what language or tool you are using.\n\n## Current implementation of proxy variables across languages\n\nKnowing how proxy variables are handled across languages allows you to set them so that they work properly. Here’s a quick rundown.\n\n### `http_proxy` and `https_proxy`\n\nIn the following table, each row represents a supported behavior, while\neach column holds the tool (e.g. `curl`) or language (e.g. `Ruby`) to\nwhich it applies:\n\n|                 | curl      | wget           | Ruby          | Python    | Go        |\n|-----------------|-----------|----------------|---------------|-----------|-----------|\n| `http_proxy`    | Yes       | Yes            | Yes           | Yes       | Yes       |\n| `HTTP_PROXY`    | No        | No             | Yes ([warning](https://github.com/ruby/ruby/blob/0ed71b37fa9af134fdd5a7fd1cebd171eba83541/lib/uri/generic.rb#L1519)) | Yes (if `REQUEST_METHOD` not in env)       | Yes       |\n| `https_proxy`   | Yes       | Yes            | Yes           | Yes       | Yes       |\n| `HTTPS_PROXY`   | Yes       | No             | Yes           | Yes       | Yes       |\n| Case precedence | lowercase | lowercase only | lowercase     | lowercase | Uppercase |\n| Reference       | [source](https://github.com/curl/curl/blob/30e7641d7d2eb46c0b67c0c495a0ea7e52333ee2/lib/url.c#L2250-L2266) | [source](https://github.com/jay/wget/blob/099d8ee3da3a6eea5635581ae517035165f400a5/src/retr.c#L1222-L1239) | [source](https://github.com/ruby/ruby/blob/0ed71b37fa9af134fdd5a7fd1cebd171eba83541/lib/uri/generic.rb#L1474-L1543) | [source](https://github.com/python/cpython/blob/030a713183084594659aefd77b76fe30178e23c8/Lib/urllib/request.py#L2488-L2517) | [source](https://github.com/golang/go/blob/682a1d2176b02337460aeede0ff9e49429525195/src/vendor/golang.org/x/net/http/httpproxy/proxy.go#L82-L97) |\n\u003Cbr>\u003C/br>\nNote that `http_proxy` and `https_proxy` are always supported across the\nboard, while `HTTP_PROXY` is not always supported. Python (via `urllib`) complicates\nthe picture even more: `HTTP_PROXY` can be used [as long as\n`REQUEST_METHOD` is not defined in the environment](https://github.com/python/cpython/blob/030a713183084594659aefd77b76fe30178e23c8/Lib/urllib/request.py#L2504-L2508).\n\nWhile you might expect environment variables to be all-caps,\n`http_proxy` came first, so that's the de facto standard. When in doubt,\ngo with the lowercase form because that's universally supported.\n\nInstead of environment variables, Java uses [system properties](https://docs.oracle.com/javase/8/docs/technotes/guides/net/proxies.html). This avoids case issues entirely.\n\nUnlike most implementations, Go tries the uppercase version before\nfalling back to the lowercase version. We will see later why that caused\nissues for one GitLab customer.\n\n### `no_proxy` format\n\nSome users have [discussed the lack of the `no_proxy` specification in this issue](https://github.com/curl/curl/issues/1208). As\n`no_proxy` specifies an exclusion list, many questions arise about\nhow it behaves. For example, suppose your `no_proxy` configuration is defined:\n\n```sh\nexport no_proxy=example.com\n```\n\nDoes this mean that the domain must be an exact match, or will\n`subdomain.example.com` also match against this configuration? The\nfollowing table shows the state of various implementations. It turns out\nall implementations will match suffixes properly, as shown in the\n`Matches suffixes?` row:\n\n|                       | curl      | wget           | Ruby      | Python    | Go        |Java |\n|-----------------------|-----------|----------------|-----------|-----------|-----------|\n| `no_proxy`            | Yes       | Yes            | Yes       | Yes       | Yes       |No*|\n| `NO_PROXY`            | Yes       | No             | Yes       | Yes       | Yes       |No*|\n| Case precedence       | lowercase | lowercase only | lowercase | lowercase |Uppercase |N/A|\n| Matches suffixes?     | Yes       | Yes            | Yes       | Yes       | Yes       |No|\n| Strips leading `.`?   | Yes       | No             | Yes       | Yes       | No        |No|\n| `*` matches all hosts?| Yes       | No             | No        | Yes       | Yes       |Yes|\n| Supports regexes?     | No        | No             | No        | No        | No        |No|\n| Supports CIDR blocks? | No        | No             | Yes       | No        | Yes       |No|\n| Detects loopback IPs? | No        | No             | No        | No        | Yes       |No|\n| Resolves IP addresses? | No        | No             | Yes        | No        | Yes       |No|\n| Reference             | [source](https://github.com/curl/curl/blob/30e7641d7d2eb46c0b67c0c495a0ea7e52333ee2/lib/url.c#L2152-L2206) | [source](https://github.com/jay/wget/blob/099d8ee3da3a6eea5635581ae517035165f400a5/src/retr.c#L1266-L1274) | [source](https://github.com/ruby/ruby/blob/eead83160bcc5f49706e05669e5a7e2620b9b605/lib/uri/generic.rb#L1552-L1577) | [source](https://github.com/python/cpython/blob/030a713183084594659aefd77b76fe30178e23c8/Lib/urllib/request.py#L2519-L2551)| [source](https://github.com/golang/go/blob/master/src/vendor/golang.org/x/net/http/httpproxy/proxy.go#L170-L205) |[documentation](https://docs.oracle.com/javase/8/docs/technotes/guides/net/proxies.html)\n\n* Java uses the `http.nonProxyHosts` system property.\n\n### The impact of leading dots in no_proxy\n\nHowever, if there is a leading `.` in the `no_proxy` setting, the\nbehavior varies. For example, `curl` and `wget` behave\ndifferently. `curl` will always strip the leading `.` and match against\na domain suffix. This call bypasses the proxy:\n\n```sh\n$ env https_proxy=http://non.existent/ no_proxy=.gitlab.com curl https://gitlab.com\n\u003Chtml>\u003Cbody>You are being \u003Ca href=\"https://about.gitlab.com/\">redirected\u003C/a>.\u003C/body>\u003C/html>\n```\n\nHowever, `wget` does not strip the leading `.` and performs an exact\nstring match against a hostname. As a result, `wget` attempts to use a\nproxy if a top-level domain is used:\n\n```sh\n$ env https_proxy=http://non.existent/ no_proxy=.gitlab.com wget https://gitlab.com\nResolving non.existent (non.existent)... failed: Name or service not known.\nwget: unable to resolve host address 'non.existent'\n```\n\nIn all implementations, regular expressions are never supported. I\nsuspect using regexes complicates matters further, because regexes have\ntheir own flavors (e.g. PCRE, POSIX, etc.). Using regexes also\nintroduces potential performance and security issues.\n\nIn some cases, setting `no_proxy` to `*` effectively disables proxies\naltogether, but this is not a universal rule.\n\nOnly Ruby performs a DNS lookup to resolve a hostname to an IP address when deciding if a proxy should be used. Be careful if you use IP addresses with Ruby because it’s possible a hostname may resolve to an excluded IP address. In general, do not specify IP addresses in no_proxy variable unless you expect that the IPs are explicitly used by the client.\n\nThe same holds true for CIDR blocks, such as `18.240.0.1/24`. CIDR\nblocks only work when the request is directly made to an IP\naddress. Only Go and Ruby allow CIDR blocks. Unlike other\nimplementations, Go even automatically disables the use of a proxy if it\ndetects a loopback IP addresses.\n\n## Why does this matter?\nDiscrepancies in proxy environment variable handling, particularly between Ruby and Go, can lead to a real-world issues where Git pushes worked via the command line but failed in the web UI for a GitLab customer. Understanding these inconsistencies is crucial for troubleshooting and configuring applications that operate across multiple languages within corporate networks utilizing proxy servers.\n\n### Challenges of defining proxy variables in multi-language applications\n\nIf you have an application written in multiple languages that needs to\nwork behind a corporate firewall with a proxy server, you may need to\npay attention to these differences. For example, GitLab is composed of a\nfew services written in Ruby and Go. One customer set its proxy\nconfiguration to something like the following:\n\n```yaml\nHTTP_PROXY: http://proxy.company.com\nHTTPS_PROXY: http://proxy.company.com\nNO_PROXY: .correct-company.com\n```\n\nThe customer reported the following issue with GitLab:\n\n1. A `git push` from the command line worked\n1. Git changes made via the Web UI failed\n\nOur support engineers discovered that due to a Kubernetes configuration\nissue, a few stale values lingered. The pod actually had an environment\nthat looked something like:\n\n```yaml\nHTTP_PROXY: http://proxy.company.com\nHTTPS_PROXY: http://proxy.company.com\nNO_PROXY: .correct-company.com\nno_proxy: .wrong-company.com\n```\n### How inconsistent proxy settings can cause failures\n\nThe inconsistent definitions in `no_proxy` and `NO_PROXY` set off red\nflags, and we could have resolved the issue by making them consistent or\nremoving the incorrect entry. But let's drill into what happened.\nRemember from above that:\n\n1. Ruby tries the lowercase form first\n1. Go tries the uppercase form first\n\nAs a result, services written in Go, such as GitLab Workhorse, had the\ncorrect proxy configuration. A `git push` from the command line worked\nfine because the Go services primarily handled this activity:\n\n```mermaid\nsequenceDiagram\n    participant C as Client\n    participant W as Workhorse\n    participant G as Gitaly\n    C->>W: 1. git push\n    W->>G: 2. gRPC: PostReceivePack\n    G->>W: 3. OK\n    W->>C: 4. OK\n```\n\nThe gRPC call in step 2 never attempted to use the proxy because\n`no_proxy` was configured properly to connect directly to Gitaly.\n\nHowever, when a user makes a change in the UI, Gitaly forwards the\nrequest to a `gitaly-ruby` service, which is written in\nRuby. `gitaly-ruby` makes changes to the repository and [reports back\nvia a gRPC call back to its parent process](https://gitlab.com/gitlab-org/gitaly/-/issues/3189).  However,\nas seen in step 4 below, the reporting step didn't happen:\n\n```mermaid\nsequenceDiagram\n    participant C as Client\n    participant R as Rails\n    participant G as Gitaly\n    participant GR as gitaly-ruby\n    participant P as Proxy\n    C->>R: 1. Change file in UI\n    R->>G: 2. gRPC: UserCommitFiles\n    G->>GR: 3. gRPC: UserCommitFiles\n    GR->>P: 4. CONNECT\n    P->>GR: 5. FAIL\n```\n\nBecause gRPC uses HTTP/2 as the underlying transport, `gitaly-ruby`\nattempted a CONNECT to the proxy since it was configured with the wrong\n`no_proxy` setting. The proxy immediately rejected this HTTP request,\ncausing the failure in the Web UI push case.\n\n### Correcting proxy configuration issues\n\nOnce we eliminated the lowercase `no_proxy` from the environment, pushes\nfrom the UI worked as expected, and `gitaly-ruby` connected directly to\nthe parent Gitaly process. Step 4 worked properly in the diagram below:\n\n```mermaid\nsequenceDiagram\n    participant C as Client\n    participant R as Rails\n    participant G as Gitaly\n    participant GR as gitaly-ruby\n    participant P as Proxy\n    C->>R: 1. Change file in UI\n    R->>G: 2. gRPC: UserCommitFiles\n    G->>GR: 3. gRPC: UserCommitFiles\n    GR->>G: 4. OK\n    G->>R: 5. OK\n    R->>C: 6. OK\n```\n\n## A surprising discovery with gRPC\n\nWe also discovered that gRPC does not [support HTTPS proxies](https://github.com/grpc/grpc/issues/20939). This again subtly affects the behavior of the system depending on how `HTTPS_PROXY` is set.\n\n### gRPC behavior with `HTTPS_proxy`\n\nNote that the customer set `HTTPS_PROXY` to an unencrypted HTTP proxy;\nnotice that `http://` is used instead of `https://`. While this isn't\nideal from a security standpoint, some people do this to avoid the\nhassle of clients failing due to TLS certificate verification issues.\n\nIronically, if an HTTPS proxy were specified, we would not have seen\nthis problem. If an HTTPS proxy is used, gRPC will ignore this setting\nsince HTTPS proxies are not supported.\n\n### The lowest common denominator\n\nI think we can all agree that one should never define inconsistent\nvalues with lowercase and uppercase proxy settings. However, if you ever\nhave to manage a stack written in multiple languages, you might need to\nconsider setting HTTP proxy configurations to the lowest common\ndenominator.\n\n1. `http_proxy` and `https_proxy`\n\n* Use lowercase form. `HTTP_PROXY` is not always supported or recommended.\n    * If you _absolutely must_ use the uppercase form as well, be **sure** they share the same value.\n\n2. `no_proxy`\n\n1. Use lowercase form.\n1. Use comma-separated `hostname:port` values.\n1. IP addresses are okay, but hostnames are never resolved.\n1. Suffixes are always matched (e.g. `example.com` will match `test.example.com`).\n1. If top-level domains need to be matched, avoid using a leading dot (`.`).\n1. Avoid using CIDR matching since only Go and Ruby support that.\n\n## Steps toward standardizing `no_proxy`\n\nKnowing the least common denominator can help avoid issues if these\ndefinitions are copied for different Web clients. But should `no_proxy`\nand the other proxy settings have a documented standard rather than an\nad hoc convention? The list below may serve as a starting point for a\nproposal:\n\n1. Prefer lowercase forms over uppercase variables (e.g. `http_proxy` should be searched before `HTTP_PROXY`).\n1. Use comma-separated `hostname:port` values.\n    * Each value may include optional whitespace.\n1. Never perform DNS lookups or use regular expressions.\n1. Use `*` to match **all** hosts.\n1. Strip leading dots (`.`) and match against domain suffixes.\n1. Support CIDR block matching.\n1. Never make assumptions about special IP addresses (e.g. loopback addresses in `no_proxy`).\n\n## Key takeaways on proxy standardization\n\nIt's been over 25 years since the first Web proxy was released. While\nthe basic mechanics of configuring a Web client via environment\nvariables have not changed much, a number of subtleties have emerged\nacross different implementations. We saw for one customer, erroneously\ndefining conflicting `no_proxy` and `NO_PROXY` variables led to hours of\ntroubleshooting due to the differences with which Ruby and Go parse\nthese settings. We hope highlighting these differences will avoid future\nissues in your production stack, and we hope that Web client maintainers\nwill standardize the behavior to avoid such issues in the first place.\n",[268,1508,4440,4441],"user stories","startups",{"slug":4443,"featured":6,"template":678},"we-need-to-talk-no-proxy","content:en-us:blog:we-need-to-talk-no-proxy.yml","We Need To Talk No Proxy","en-us/blog/we-need-to-talk-no-proxy.yml","en-us/blog/we-need-to-talk-no-proxy",{"_path":4449,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4450,"content":4455,"config":4460,"_id":4462,"_type":16,"title":4463,"_source":17,"_file":4464,"_stem":4465,"_extension":20},"/en-us/blog/mr-reviews-with-vs-code",{"title":4451,"description":4452,"ogTitle":4451,"ogDescription":4452,"noIndex":6,"ogImage":2284,"ogUrl":4453,"ogSiteName":692,"ogType":693,"canonicalUrls":4453,"schema":4454},"How to do GitLab merge request reviews in VS Code","Code review is critical to modern software development. We're making it easier by bringing merge request reviews right into VS Code.","https://about.gitlab.com/blog/mr-reviews-with-vs-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to do GitLab merge request reviews in VS Code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tomas Vik\"}],\n        \"datePublished\": \"2021-01-25\",\n      }",{"title":4451,"description":4452,"authors":4456,"heroImage":2284,"date":4457,"body":4458,"category":14,"tags":4459},[3291],"2021-01-25","\n\nThis post will give you an idea of how VS Code can aid your code review process. You'll get an overview of the features that GitLab VS Code Extension currently supports, as well as what we plan to introduce in the future.\n\nReviewing merge requests is a core part of GitLab: both the product (since [version 2.0.0](https://gitlab.com/gitlab-org/gitlab/blob/6a3621202e3f7274150862198f59d2579c326650/changelogs/archive.md#L7222), released in 2011) and the company. We recognize that certain review tasks are hard to do just by looking at the diff, and we strive to make them easier. One such task might be looking in the codebase for duplicated code or examples of a particular coding style.\n\nWe decided to aid code reviewers in two ways:\n\n## First way: The GitLab Web IDE\n\nFirst, we introduced the [Web IDE](/blog/introducing-gitlab-s-integrated-development-environment/), which helps our users work [with the codebase in the browser](/direction/create/ide/web_ide/#overview). You can quickly open multiple files, make changes, and commit them. The Web IDE is handy when you need to make a small change, or you don't have the project cloned locally.\n\nThe second way is more recent. We always wanted to bring the code review experience closer to code editors, where developers spend a large portion of their time. But the editor market is very fragmented (you find out the hard way if Emacs and Vim users meet at a party). And it isn't feasible to build GitLab support into all major editors (however, there are plenty of editor plugins maintained by the community[^1]). \n\n## Second way: Bringing code reviews into the editor\n\nRecently, as [VS Code gained a significant user share](https://insights.stackoverflow.com/survey/2019#development-environments-and-tools), it started to make sense to [commit to maintaining the GitLab VS Code extension](/blog/use-gitlab-with-vscode/), which was started as a community project by one, at the time, GitLab employee: [Fatih](https://gitlab.com/fatihacet). After an initial housekeeping period, we started chipping away tasks that will ultimately bring the code review experience into the editor.\n\nIn my previous post I talked about the great [VS Code Extension API](/blog/vscode-extension-development-with-gitlab/). This API gives extensions almost full control over the editor. When the API introduced commenting functionality two years ago, extensions could start contributing comments to the editor windows. These comments are shown similarly as comments on a Google Doc. Being able to natively show comments is perfect for reviewing code changes in the editor and other extensions that provide code reviews are already using this commenting API[^2].\n\n![Merge request review in VS Code](https://about.gitlab.com/images/blogimages/mr-reviews-with-vs-code/full-mr-review-screen.png){: .shadow.medium.center}\nMerge request review in VS Code\n{: .note .text-center}\n\nOver the last few milestones, we started showing MR changes in VS Code and even showing discussions on these. This means that you can open an MR in your editor and read through the code and comments without switching windows and context. I find this really useful because I can still interact with my editor the way I'm used to, even as I'm reviewing MRs. I can use full-text search to find if the MR duplicates existing code or I can open a different test file and compare whether the code style matches.\n\nCurrently, the interaction with MR is mostly read-only. That means you can see the changes and discussions, but you can't add or change comments, yet[^3]. But even in this current form, you can benefit from having the VS Code functionality so close to your review, especially for the initial understanding of the change.\n\n![VS Code supports Markdown in the comments](https://about.gitlab.com/images/blogimages/mr-reviews-with-vs-code/mr-review-long-comment.png){: .shadow.medium.center}\nVS Code supports Markdown in the comments\n{: .note .text-center}\n\n## What's next\n\nOver the next few milestones, we plan to make the commenting as interactive as you know it from the GitLab web interface. We'll start with editing existing comments, adding emoji reactions and resolving discussion threads. Lastly, we'll implement the full review functionality with creating comments and reviews[^4]. Each [iteration](https://handbook.gitlab.com/handbook/values/#iteration) will make the feature a bit more useful.\n\nI'm excited about the potential to stay in my editor for both creating and reviewing merge requests. I'm already using the current merge request review feature to get the initial understanding of what the MR tries to achieve. I can explore the related code more quickly in my editor. If you'd like to help us build the code review feature or just look at the current state of development, visit the [Merge Request Review epic](https://gitlab.com/groups/gitlab-org/-/epics/4607).\n\nYou can check out a walkthrough our initial proof of concept of merge request reviews in VS Code below:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/kKA6i8oqZAA\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n[^1]: [IntelliJ](https://plugins.jetbrains.com/plugin/7447-gitlab-integration-plugin), [Atom](https://atom.io/packages/search?q=gitlab), [vim](https://github.com/shumphrey/fugitive-gitlab.vim), [Emacs](https://github.com/nlamirault/emacs-gitlab), ...\n[^2]: [Jira and Bitbucket](https://marketplace.visualstudio.com/items?itemName=Atlassian.atlascode), [GitHub Pull Requests and Issues](https://marketplace.visualstudio.com/items?itemName=GitHub.vscode-pull-request-github)\n[^3]: You can work around that by using the MR overview and commenting there.\n[^4]: [MR review: interacting with existing comments - POC](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/269) and [MR review: new comments and reviews POC](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/293) represent the initial investigation.\n\n[Cover image](https://art.ljubicapetkovic.com/cc-licensed/) by [Ljubica Petkovic](https://art.ljubicapetkovic.com), licensed under [CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/)\n{: .note}\n",[1084,232,726],{"slug":4461,"featured":6,"template":678},"mr-reviews-with-vs-code","content:en-us:blog:mr-reviews-with-vs-code.yml","Mr Reviews With Vs Code","en-us/blog/mr-reviews-with-vs-code.yml","en-us/blog/mr-reviews-with-vs-code",{"_path":4467,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4468,"content":4474,"config":4480,"_id":4482,"_type":16,"title":4483,"_source":17,"_file":4484,"_stem":4485,"_extension":20},"/en-us/blog/high-availability-git-storage-with-praefect",{"title":4469,"description":4470,"ogTitle":4469,"ogDescription":4470,"noIndex":6,"ogImage":4471,"ogUrl":4472,"ogSiteName":692,"ogType":693,"canonicalUrls":4472,"schema":4473},"Meet Praefect: The traffic manager making your Git data highly available","This router and transaction manager ensures there are multiple copies of each Git repository available in the event of an outage – no NFS required.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669204/Blog/Hero%20Images/traffic-intersection.jpg","https://about.gitlab.com/blog/high-availability-git-storage-with-praefect","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Meet Praefect: The traffic manager making your Git data highly available\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Zeger-Jan van de Weg\"}],\n        \"datePublished\": \"2021-01-21\",\n      }",{"title":4469,"description":4470,"authors":4475,"heroImage":4471,"date":4477,"body":4478,"category":14,"tags":4479},[4476],"Zeger-Jan van de Weg","2021-01-21","\nAs critical software projects grow, scaling infrastructure to make the service [highly available](https://en.wikipedia.org/wiki/High_availability) is key. At GitLab, our biggest struggle in scaling was right in our name: Git.\n\n## The trouble with scaling Git\n\nGit is software that is distributed, but not usually run in a ‘highly available cluster,’ which is what GitLab needs. At first, we solved this with a [boring solution](https://handbook.gitlab.com/handbook/values/#boring-solutions), NFS – which exposes a shared filesystem across multiple machines and generally worked. As we’d soon find out, most NFS appliances were for bulk storage and not fast enough. This led to problems with GitLab’s Git access being slow.\n\nTo solve the speed problem we built [Gitaly, our service that provides high-level RPC access to Git repositories](https://docs.gitlab.com/ee/administration/gitaly/). \n\nWhen we started with [Gitaly v1.0](/blog/the-road-to-gitaly-1-0/), our goal was to remove the need for a network-attached filesystem access for Git data. When that was complete, the next problem to tackle was that all your data is only stored once. So, if you have a server down, or your hard disk dies, or something happens to this one copy, you're in deep trouble until a backup is restored. This is an issue for GitLab.com, but it’s also a big risk for our customers and community.\n\nBack at our [Summit in Cape Town](/company/culture/contribute/previous/#summit-in-cape-town-south-africa) in 2018, the Gitaly team (at the time, that was [Jacob Vosmaer](/company/team/?department=all#jacobvosmaer-gitlab) and me) and some other engineers discussed pursuing a fault-tolerant, highly available system for Git data. For about a month we went back and forth about how we would go about it – ranging from wild ideas to smaller iterations towards what we want. The challenge here was that the ultimate aim is always going to be 100% availability, but you’re never going to make that. So let's aim for a lot of nines (three nines being 99.9%, five being 99.999%, etc.) Ideally, we'd be able to iterate to 10 nines if we wanted to. \n\nEventually we chose the design of a proxy: introduce a new component in the GitLab architecture, which is Praefect, and then route all the traffic through it to Gitaly storage nodes to provide a [Gitaly Cluster](https://docs.gitlab.com/ee/administration/gitaly/praefect.html). Praefect inspects the request and tries to route it to the right Gitaly backend, checks that Gitaly is up, makes sure the copies of your data are up to date, and so on. \n\n## First iteration: Eventual consistency\n\nTo cut the scope, for our first iterations we settled on eventual consistency, which is fairly common – we even use it for some GitLab features. With Git data, if we are behind a minute, it's not a big deal because at GitLab at least 90% of operations on our Git data are just reads, compared to a very small volume of writes. If I run `git pull` and I'm one commit behind master, that's not ideal, but not a deal breaker in most cases. \n\nWith eventual consistency, each repository gets three copies: one primary and two secondary. We replicate your data from the primary to the other copies, so that if your primary is inaccessible, we can at least give you read access to the secondary copies until we recover the primary. There’s a chance the secondaries are one or two commits behind your primary, but it’s better than no access.\n\nWe rolled this out in [13.0](/releases/2020/05/22/gitlab-13-0-released/#gitaly-cluster-for-high-availability-git-storage) as generally available. \n\n## Strong consistency\n\nThe next stage was to work on strong consistency, where all of your three copies are always up to date. \n\nWhen you write to your Git repository, there’s a moment where Praefect says, “OK, I'm going to update branch A from #abc to #cbd.” If all three copies agree on the updates, then Praefect tells everyone to apply this update and now, almost at the same moment in time, they'll update the data to the same thing. Now you've got three copies that are up to date.\n\nSo, if one copy is offline for some reason – let’s say a network partition, or the disk is corrupted – we can serve from the other two copies. Then the data remains available, and you have more time to recover the third copy as an admin. Effectively, while you always have a designated primary, it's actually more like having _three_ primaries, because they are all in the same state. \n\nIf the default state of a system is consistent it requires maintaining this consistency on each mutation to the data that's performed. All possible requests to Gitaly are grouped into two classes: mutators and accessors. Meaning that there was a risk we had to migrate each mutator RPC individually. That would've been a major effort, and if possible, we wanted to push this problem to Git. Gitaly uses Git for the majority of write operations, and was thus the largest common denominator.\n\nSo Git had to become aware of transactions, which ideally isn't part of Git. There are more areas where it would be nice if Git was aware of business logic, but if we're honest with ourselves, it's not really Git's concern: authentication and authorization. At GitLab we use [Git Hooks](https://git-scm.com/docs/githooks.html#_hooks) for that. So the idea [applied and contributed](https://public-inbox.org/git/1de96b96e3448c8f7e7974f7c082fd08d2d14e96.1592475610.git.ps@pks.im/T/#m9ae42f583968aa1d8ca43bd3007333cf51a618cc) (thanks, [Patrick Steinhardt](/company/team/#pks-gitlab)!) was the same: when events happen with Git, execute a hook and allow Gitaly to execute business logic. Through the exit code of the hook, Git is signaled on how to proceed. In Git, these events are updates of any reference (for example, branches or tags). When this happens Git will then allow Gitaly to participate in a [three-phase commit](https://en.wikipedia.org/wiki/Three-phase_commit_protocol) transaction by communicating back to Praefect, and enforce consistency. So we got that released in Git, fixed a bug, and now we’re [rolling it out to almost all write requests](https://gitlab.com/gitlab-org/git/-/issues/79).\n\n## A defensible cost increase\n\nNow strong consistency is great, but we are effectively asking our customers, “Instead of one copy, why don't you triple your storage costs and your server costs and whatnot, and you have zero benefits unless something goes wrong.” That wasn't really appealing for most customers, but now we’ve sweetened the deal with increased performance and making the cost increase more manageable. \n\nSo, if you have three copies of your data that are up to date, then all of them could serve any request that doesn't mutate the data, right? Because you know they're up to date. Right now, [Pavlo](/company/team/?department=gitaly-team#8bitlife) is working on [read distribution, which we are making generally available in 13.8](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/71960) (coming Jan. 22, 2021). [We rolled it out briefly before](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/58694), but it didn’t scale as expected, so we’ve worked with QA to mitigate that.\n\nRight now, Praefect is rolled out to a very limited subset of projects on GitLab.com, because running it is expensive already. When I first proposed rolling it out for everyone, it was very quick to calculate that that will triple our Gitaly Clusters – not within the budget at all! So we're trying to iterate towards that goal. The first step is to work on allowing a [variable replication factor](https://docs.gitlab.com/ee/administration/gitaly/praefect.html#variable-replication-factor). It can be expensive to store a lot of data multiple times, so why don't we make it so that you can store some repositories three times and some just one time, and you don't get the guarantees and the availability of those with three copies.\n\n## Challenges and lessons learned\n\nSo we have Praefect, this new component, but it's not installed by default on GitLab Omnibus –\nyou have to enable it yourself. The [GitLab Development Kit](https://gitlab.com/gitlab-org/gitlab-development-kit) uses it as well as the tests on GitLab.com, for GitLab projects, but that wasn’t always the case. When you have an optional part in your architecture, if you’re debugging or talking with customers, there is the additional mental burden of verifying what the architecture looks like. Without it, you can make much quicker assumptions on what's going on and why it's working or why it isn't. Officially, we have deprecated NFS, so it makes sense to make it a required component so we can depend on it being there.\n\nAlso, as we add more features to Praefect, if it’s still optional then some customers get those added benefits and some don’t.\n\n### We should have put it in production sooner\n\nOur first iteration was just proxying the traffic, doing nothing with it, and verifying that it works. We didn't put it in production because it offered nothing to the community. But, it includes new components in your architecture, which our SREs need to know about, and there were a couple of bugs we found out much later. I was hesitant to put something in production that didn't offer anything in return, but if we’d been a little more aggressive with putting it out there – even just for a small subset of projects – we would understand more quickly what we're running, what was working, and what wasn't. \n\n### Applying big architectural changes takes time\n\nIf you ask customers to make giant architectural changes, it's going to take longer than you think. When we released Praefect and Gitaly Clusters in 13.0, it was fairly rough around the edges and some things weren't working as you would expect, but it was a good time to release because now, six months later, we see customers finally starting to implement it. They want to validate, try it out on a subset, and then finally roll it out for their whole GitLab instance. While that took longer than I expected, it's cool to see the numbers going up now, and adoption is growing quite rapidly.\n\n## More than just a traffic manager\n\nPraefect does much more than just inspect the traffic. If Gitaly goes down, ideally you want to notice that before you actually fire a request, which Praefect does. It does failover, so if one fails and it was designated as a primary, then it fails over to a secondary, which is now designated as a primary. \n\nI'm really excited for the next few years and the kind of things we are planning to build in Praefect and what that will deliver to GitLab.com and our customers and community. Where before we didn’t have very granular control over what we were doing or why we were doing it, now we can intercept and optimize.\n\n## What's next\n\nWe're shipping [HA Distributed Reads](https://gitlab.com/gitlab-org/gitaly/-/issues/3334) in GitLab 13.8 (Jan. 22, 2021). For 13.9, we're shooting for [strong consistency in the Gitaly Cluster](https://gitlab.com/groups/gitlab-org/-/epics/1189) and [variable replication factor](https://gitlab.com/groups/gitlab-org/-/epics/3372).\n\nFor GitLab self-managed users, consider enabling Praefect if you have high availability requirements. Visit our [Gitaly Clusters documentation](https://docs.gitlab.com/ee/administration/gitaly/praefect.html) to get started.\n\n_Major thanks to [Rebecca Dodd](/company/team#rebecca) who contributed to this post._\n\nCover image by [Yoel J Gonzalez](https://unsplash.com/@yoeljgonzalez?utm_source=unsplash&amp;utm_medium=referral&amp;utm_content=creditCopyText\") on [Unsplash](https://unsplash.com/s/photos/traffic?utm_source=unsplash&amp;utm_medium=referral&amp;utm_content=creditCopyText)\n{: .note}\n",[1328,702,915,232,703],{"slug":4481,"featured":6,"template":678},"high-availability-git-storage-with-praefect","content:en-us:blog:high-availability-git-storage-with-praefect.yml","High Availability Git Storage With Praefect","en-us/blog/high-availability-git-storage-with-praefect.yml","en-us/blog/high-availability-git-storage-with-praefect",{"_path":4487,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4488,"content":4493,"config":4499,"_id":4501,"_type":16,"title":4502,"_source":17,"_file":4503,"_stem":4504,"_extension":20},"/en-us/blog/using-run-parallel-jobs",{"title":4489,"description":4490,"ogTitle":4489,"ogDescription":4490,"noIndex":6,"ogImage":4351,"ogUrl":4491,"ogSiteName":692,"ogType":693,"canonicalUrls":4491,"schema":4492},"How we used parallel CI/CD jobs to increase our productivity","GitLab uses parallel jobs to help long-running jobs run faster.","https://about.gitlab.com/blog/using-run-parallel-jobs","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we used parallel CI/CD jobs to increase our productivity\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Miguel Rincon\"}],\n        \"datePublished\": \"2021-01-20\",\n      }",{"title":4489,"description":4490,"authors":4494,"heroImage":4351,"date":4496,"body":4497,"category":14,"tags":4498},[4495],"Miguel Rincon","2021-01-20","\n\nAt GitLab, we must verify simultaneous changes from the hundreds of people that contribute to GitLab each day. How can we help them contribute efficiently using our pipelines?\n\nThe pipelines that we use to build and verify GitLab have more than 90 jobs. Not all of those jobs are equal. Some are simple tasks that take a few seconds to finish, while others are long-running processes that must be optimized carefully.\n\nAt the time of this writing, we have more than 700 [pipelines running](https://gitlab.com/gitlab-org/gitlab/-/pipelines?page=1&scope=all&status=running). Each of these pipelines represent changes from team members and contributors from the wider community. All GitLab contributors must wait for the pipelines to finish to make sure the change works and integrates with the rest of the product. We want our pipelines to finish as fast as possible to maintain the productivity of our teams.\n\nThis is why we constantly monitor the duration of our pipelines. For example, in December 2020, successful merge request pipelines had a duration of [53.8 minutes](/handbook/engineering/quality/performance-indicators/#average-merge-request-pipeline-duration-for-gitlab):\n\n![Average pipeline duration was 53.8 minutes in December](https://about.gitlab.com/images/blogimages/using-run-parallel-jobs/historical-pipeline-duration.png){: .shadow.medium.center}\nThe average pipeline took 53.8 minutes to finish in December.\n{: .note.text-center}\n\nGiven that we run [around 500 merge request pipelines](https://gitlab.com/gitlab-org/gitlab/-/pipelines/charts) per day, we want to know: Can we optimize our process to change how long-running jobs _run_?\n\n## How we fixed our bottleneck jobs by making them run in parallel\n\nThe `frontend-fixtures` job uses `rspec` to generate mock data files, which are then saved as files called \"fixtures\". These files are loaded by our frontend tests, so the `frontend-fixtures` must finish before any of our frontend tests can start.\n\n> As not all of our tests need these frontend fixtures, many jobs use the [`needs` keyword](https://docs.gitlab.com/ee/ci/yaml/#needs) to start before the `frontend-fixtures` job is done.\n\nIn our pipelines, this job looked like this:\n\n![The `frontend-fixtures` job](https://about.gitlab.com/images/blogimages/using-run-parallel-jobs/fixtures-job.png){: .shadow.medium.center}\nInside the frontend fixtures job.\n{: .note.text-center}\n\n\nThis job had a normal duration of 20 minutes, and each individual fixture could be generated independently, so we knew there was an opportunity to run this process in parallel.\n\nThe next step was to configure our pipeline to split the job into multiple batches that could be run in parallel.\n\n## How to make frontend-fixtures a parallel job\n\nFortunately, GitLab CI provides an easy way to run a job in parallel using the [`parallel` keyword](https://docs.gitlab.com/ee/ci/yaml/#parallel). In the background, this creates \"clones\" of the same job, so that multiple copies of it can run simultaneously.\n\n**Before:**\n\n```yml\nfrontend-fixtures:\n  extends:\n    - .frontend-fixtures-base\n    - .frontend:rules:default-frontend-jobs\n```\n\n**After:**\n\n```yml\nrspec-ee frontend_fixture:\n  extends:\n    - .frontend-fixtures-base\n    - .frontend:rules:default-frontend-jobs\n  parallel: 2\n```\n\nYou will notice two changes. First, we changed the name of the job, so our job is picked up by [Knapsack](https://docs.knapsackpro.com/ruby/knapsack) (more on that later), and then we add the keyword `parallel`, so the job gets duplicated and runs in parallel.\n\nThe new jobs that are generated look like this:\n\n![Our fixtures job running in parallel](https://about.gitlab.com/images/blogimages/using-run-parallel-jobs/fixtures-job-parallel.png){: .shadow.medium.center}\nThe new jobs that are picked up by Knapsack and run in parallel.\n{: .note.text-center}\n\nAs we used a value of `parallel: 2`, actually two jobs are generated with the names:\n\n- `rspec-ee frontend_fixture 1/2`\n- `rspec-ee frontend_fixture 2/2`\n\nOur two \"generated\" jobs, now take three and 17 minutes respectively, giving us an overall decrease of about three minutes.\n\n![Two parallel jobs in the pipeline](https://about.gitlab.com/images/blogimages/using-run-parallel-jobs/fixtures-job-detail.png){: .shadow.medium.center}\nThe parallel jobs that are running in the pipeline.\n{: .note.text-center}\n\n## Another way we optimized the process\n\nAs we use Knapsack to distribute the test files among the parallel jobs, we were able to make more improvements by reducing the time it takes our longest-running fixtures-generator file to run.\n\nWe did this by splitting the file into smaller batches and optimizing it, so we have more tests running in parallel, which shaved off an additional [~3.5 minutes](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/47158#note_460372560).\n\n## Tips for running parallel jobs\n\nIf you want to ramp up your productivity you can leverage `parallel` on your pipelines by following these tips:\n\n1. Measure the time your pipelines take to run and identify possible bottlenecks to your jobs. You can do this by checking which jobs are slower than others.\n1. Once your slow jobs are identified, try to figure out if they can be run independently from each other or in batches.\n   - Automated tests are usually good candidates, as they tend to be self-contained and run in parallel anyway.\n1. Add the `parallel` keyword, while measuring the outcome over the next few running pipelines.\n\n## Learn more about our solution\n\nWe discuss how running jobs in parallel improved the speed of pipelines on GitLab Unfiltered.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/hKsVH_ZhSAk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nAnd here are links to some of the resources we used to run pipelines in parallel:\n\n- The [merge request that introduced `parallel` to fixtures](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/46959).\n- An important [optimization follow-up](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/47158) to make one of the slow tests faster.\n- The [Knapsack gem](https://docs.knapsackpro.com/ruby/knapsack), which we leverage to split the tests more evenly in multiple CI nodes.\n\nAnd many thanks to [Rémy Coutable](/company/team/#rymai), who helped me implement this improvement.\n\nCover image by [@dustt](https://unsplash.com/@dustt) on [Unsplash](https://unsplash.com/photos/ZqBNb7xK5s8)\n{: .note}\n",[915,832,937,704,727],{"slug":4500,"featured":6,"template":678},"using-run-parallel-jobs","content:en-us:blog:using-run-parallel-jobs.yml","Using Run Parallel Jobs","en-us/blog/using-run-parallel-jobs.yml","en-us/blog/using-run-parallel-jobs",{"_path":4506,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4507,"content":4513,"config":4520,"_id":4522,"_type":16,"title":4523,"_source":17,"_file":4524,"_stem":4525,"_extension":20},"/en-us/blog/this-sre-attempted-to-roll-out-an-haproxy-change",{"title":4508,"description":4509,"ogTitle":4508,"ogDescription":4509,"noIndex":6,"ogImage":4510,"ogUrl":4511,"ogSiteName":692,"ogType":693,"canonicalUrls":4511,"schema":4512},"This SRE's HAProxy Config Change: An Unexpected Journey","This post is about a wild discovery made while investigating strange behavior from HAProxy. We dive into the pathology, describe how we found it, and share some investigative techniques used along the way.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681844/Blog/Hero%20Images/infra-proxy-protocol-wireshark-header.png","https://about.gitlab.com/blog/this-sre-attempted-to-roll-out-an-haproxy-change","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"This SRE attempted to roll out an HAProxy config change. You won't believe what happened next... \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Igor Wiedler\"}],\n        \"datePublished\": \"2021-01-14\",\n      }",{"title":4514,"description":4509,"authors":4515,"heroImage":4510,"date":4517,"body":4518,"category":14,"tags":4519},"This SRE attempted to roll out an HAProxy config change. You won't believe what happened next...",[4516],"Igor Wiedler","2021-01-14","\n\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2021-02-12.\n{: .note .alert-info .text-center}\n\n## TL;DR\n\n- HAProxy has a `server-state-file` directive that persists some of its state across restarts.\n- This state file contains the port of each backend server.\n- If an `haproxy.cfg` change modifies the port, the new port will be overwritten with the previous one from the state file.\n- A workaround is to change the backend server name, so that it is considered to be a separate server that does not match what is in the state file.\n- This has implications for the rollout procedure we use on HAProxy.\n\n## Background\n\nAll of this occurred in the context of [the gitlab-pages PROXYv2\nproject](https://gitlab.com/gitlab-com/gl-infra/infrastructure/-/issues/11902).\n\nThe rollout to staging involves changing the request flow from TCP proxying...\n```\n                   443                   443                        1443\n[ client ] -> [ google lb ] -> [ fe-pages-01-lb-gstg ] -> [ web-pages-01-sv-gstg ]\n      tcp,tls,http         tcp                        tcp            tcp,tls,http\n```\n\n... to using the [PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt):\n```\n                   443                   443                        2443\n[ client ] -> [ google lb ] -> [ fe-pages-01-lb-gstg ] -> [ web-pages-01-sv-gstg ]\n      tcp,tls,http         tcp                     proxyv2,tcp       proxyv2,tcp,tls,http\n```\n\nThis is done through this change to `/etc/haproxy/haproxy.cfg` on\n`fe-pages-01-lb-gstg` (note the port change):\n```diff\n-    server web-pages-01-sv-gstg web-pages-01-sv-gstg.c.gitlab-staging-1.internal:1443 check inter 3s fastinter 1s downinter 5s fall 3 port 1080\n-    server web-pages-02-sv-gstg web-pages-02-sv-gstg.c.gitlab-staging-1.internal:1443 check inter 3s fastinter 1s downinter 5s fall 3 port 1080\n+    server web-pages-01-sv-gstg web-pages-01-sv-gstg.c.gitlab-staging-1.internal:2443 check inter 3s fastinter 1s downinter 5s fall 3 port 1080 send-proxy-v2\n+    server web-pages-02-sv-gstg web-pages-02-sv-gstg.c.gitlab-staging-1.internal:2443 check inter 3s fastinter 1s downinter 5s fall 3 port 1080 send-proxy-v2\n```\n\nSeems straightforward enough, let's go ahead and apply that change.\n\n## The brokenness\n\nAfter applying this change on one of the two `fe-pages` nodes, the requests to\nthat node start failing.\n\nBy retrying a few times via `curl` on the command line, we see this error:\n```\n➜  ~ curl -vvv https://jarv.staging.gitlab.io/pages-test/\n*   Trying 35.229.69.78...\n* TCP_NODELAY set\n* Connected to jarv.staging.gitlab.io (35.229.69.78) port 443 (#0)\n* ALPN, offering h2\n* ALPN, offering http/1.1\n* successfully set certificate verify locations:\n*   CAfile: /etc/ssl/cert.pem\n  CApath: none\n* TLSv1.2 (OUT), TLS handshake, Client hello (1):\n* LibreSSL SSL_connect: SSL_ERROR_SYSCALL in connection to jarv.staging.gitlab.io:443\n* Closing connection 0\ncurl: (35) LibreSSL SSL_connect: SSL_ERROR_SYSCALL in connection to jarv.staging.gitlab.io:443\n```\n\nThis looks like some issue in the TLS stack, or possibly with the underlying\nconnection. It turns out that `LibreSSL` does not give us much insight into the\nunderlying issue here.\n\nSo to get a better idea, let's capture a traffic dump on the HAProxy node:\n```\nsudo tcpdump -v -w \"$(pwd)/$(hostname).$(date +%Y%m%d_%H%M%S).pcap\"\n```\n\nWhile `tcpdump` is running, we can generate some traffic, then ctrl+c and pull\nthe dump down for further analysis. That `pcap` file can be opened in Wireshark, and this allows the data to be\nexplored and filtered interactively. Here, the first really surprising thing happens:\n\n**We do not see any traffic on port 2443.**\n\nAt the same time, we _do_ see some traffic on port 1443. But we came here to look at what underlies the LibreSSL error, and what we find\nis the following (by filtering for `ip.addr == \u003Cmy external ip>`). We have a TCP SYN/ACK, establishing the connection. Followed by the client\nsending a TLS \"hello\". After which the server closes the connection with a FIN.\n\nIn other words, the server is closing the connection on the client.\n\n## The early hypotheses\n\nSo here come the usual suspects:\n\n* Did we modify the correct place in the config file?\n* Did we catch all places we need to update in the config?\n* Did the HAProxy process parse th econfig successfully?\n* Did HAProxy actually reload?\n* Is there a difference between reload and restart?\n* Did we modify the correct config file?\n* Are there old lingering HAProxy processes on the box?\n* Are we actually sending traffic to this node?\n* Are backend health checks failing?\n* Is there anything in the HAProxy logs?\n\nNone of these gave any insights whatsoever.\n\nIn an effort to reproduce the issue, I ran HAProxy on my local machine with a\nsimilar config, proxying traffic to `web-pages-01-sv-gstg`. To my surprise, this\nworked correctly. I tested with different HAProxy versions. It worked locally, but not on\n`fe-pages-01`.\n\nAt this point I'm stumped. The local config is not identical to gstg, but quite\nsimilar. What could possibly be the difference?\n\n## Digging deeper\n\nThis is when I reached out to [Matt Smiley](/company/team#/msmiley) to help with the investigation.\n\nWe started off by repeating the experiment. We saw the same results:\n\n* Server closes connection after client sends TLS hello\n* No traffic from fe-pages to web-pages on port 2443\n* Traffic from fe-pages to web-pages on port 1443\n\nThe first lead was to look at the packets going to port 1443. What do they\ncontain? We see this:\n\n![Traffic capture in wireshark showing a TCP FIN and the string QUIT in the stream](https://about.gitlab.com/images/blogimages/infra-proxy-protocol-wireshark.png){: .shadow.center}\nTraffic capture in Wireshark showing a TCP FIN and the string QUIT in the stream\n{: .note.text-center}\n\nThere is mention of `jarv.staging.gitlab.io` which does match what the client sent. And before that there is some really weird preamble:\n\n```\n\"\\r\\n\\r\\n\\0\\r\\nQUIT\\n\"\n```\n\nWhat on earth is this? Is it from the PROXY protocol? Let's search [the\nspec](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) for the word\n\"QUIT.\" Nothing.\n\nIs this something in the HAProxy source? Searching for \"QUIT\" in the code\nreveals some hits, but none that explain this.\n\nSo this is a mystery. We leave it for now, and probe in a different direction.\n\n## Honing in\n\nHow come we are sending traffic to port 1443, when that port is not mentioned in\n`haproxy.cfg`? Where on earth is HAProxy getting that information from?\n\nI suggested running `strace` on HAProxy startup, so that we can see which files\nare being `open`ed. This is a bit tricky to do though, because the process is\nsystemd-managed.\n\nIt turns out that thanks to BPF and [BCC](https://github.com/iovisor/bcc), we\ncan actually listen on open events system-wide using the wonderful\n[opensnoop](https://github.com/iovisor/bcc/blob/master/tools/opensnoop.py). So we run `opensnoop` and restart `haproxy`, and this is what we see, highlighting the relevant bit:\n```\niwiedler@fe-pages-01-lb-gstg.c.gitlab-staging-1.internal:~$ sudo /usr/share/bcc/tools/opensnoop  -T --name haproxy\n\n...\n\n24.117171000  16702  haproxy             3   0 /etc/haproxy/haproxy.cfg\n...\n24.118099000  16702  haproxy             4   0 /etc/haproxy/errors/400.http\n...\n24.118333000  16702  haproxy             4   0 /etc/haproxy/cloudflare_ips_v4.lst\n...\n24.119109000  16702  haproxy             3   0 /etc/haproxy/state/global\n```\n\nWhat do we have here? `/etc/haproxy/state/global`, this seems oddly suspicious.\nWhat could it possibly be? Let's see what this file contains.\n```\niwiedler@fe-pages-01-lb-gstg.c.gitlab-staging-1.internal:~$ sudo cat /etc/haproxy/state/global\n\n1\n# be_id be_name srv_id srv_name srv_addr srv_op_state srv_admin_state srv_uweight srv_iweight srv_time_since_last_change srv_check_status srv_check_result srv_check_health srv_check_state srv_agent_state bk_f_forced_id srv_f_forced_id srv_fqdn srv_port srvrecord\n5 pages_http 1 web-pages-01-sv-gstg 10.224.26.2 2 0 1 1 21134 15 3 4 6 0 0 0 web-pages-01-sv-gstg.c.gitlab-staging-1.internal 1080 -\n5 pages_http 2 web-pages-02-sv-gstg 10.224.26.3 2 0 1 1 20994 15 3 4 6 0 0 0 web-pages-02-sv-gstg.c.gitlab-staging-1.internal 1080 -\n6 pages_https 1 web-pages-01-sv-gstg 10.224.26.2 2 0 1 1 21134 15 3 4 6 0 0 0 web-pages-01-sv-gstg.c.gitlab-staging-1.internal 1443 -\n6 pages_https 2 web-pages-02-sv-gstg 10.224.26.3 2 0 1 1 20994 15 3 4 6 0 0 0 web-pages-02-sv-gstg.c.gitlab-staging-1.internal 1443 -\n```\n\nIt appears we are storing some metadata for each backend server, including its old port number!\n\nNow, looking again in `haproxy.cfg`, we see:\n```\nglobal\n    ...\n    server-state-file /etc/haproxy/state/global\n```\n\nSo we are using the\n[`server-state-file`](https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#server-state-file)\ndirective. This will persist server state across HAProxy restarts. That is\nuseful to keep metadata consistent, such as whether a server was marked as\nMAINT.\n\n**However, it appears to be clobbering the port from `haproxy.cfg`!**\n\nThe suspected behavior is:\n\n* HAProxy is running with the old config: `web-pages-01-sv-gstg`, `1443`\n* `haproxy.cfg` is updated with the new config: `web-pages-01-sv-gstg`, `2443`, `send-proxy-v2`\n* HAProxy reload is initiated\n* HAProxy writes out the state to `/etc/haproxy/state/global` (including the old port of each backend server)\n* HAProxy starts up, reads `haproxy.cfg`, initializes itself with the new config: `web-pages-01-sv-gstg`, `2443`, `send-proxy-v2`\n* HAProxy reads the state from `/etc/haproxy/state/global`, matches on the backend server `web-pages-01-sv-gstg`, and overrides all values, including the port!\n\nThe result is that we are now attempting to send PROXYv2 traffic to the TLS port.\n\n## The workaround\n\nTo validate the theory and develop a potential workaround, we modify\n`haproxy.cfg` to use a different backend server name.\n\nThe new diff is:\n```diff\n-    server web-pages-01-sv-gstg         web-pages-01-sv-gstg.c.gitlab-staging-1.internal:1443 check inter 3s fastinter 1s downinter 5s fall 3 port 1080\n-    server web-pages-02-sv-gstg         web-pages-02-sv-gstg.c.gitlab-staging-1.internal:1443 check inter 3s fastinter 1s downinter 5s fall 3 port 1080\n+    server web-pages-01-sv-gstg-proxyv2 web-pages-01-sv-gstg.c.gitlab-staging-1.internal:2443 check inter 3s fastinter 1s downinter 5s fall 3 port 1080 send-proxy-v2\n+    server web-pages-02-sv-gstg-proxyv2 web-pages-02-sv-gstg.c.gitlab-staging-1.internal:2443 check inter 3s fastinter 1s downinter 5s fall 3 port 1080 send-proxy-v2\n```\n\nWith this config change in place, we reload HAProxy and indeed, it is now\nserving traffic correctly. See [the merge request fixing it](https://gitlab.com/gitlab-cookbooks/gitlab-haproxy/-/merge_requests/261).\n\n## A follow-up on those `QUIT` bytes\n\nNow, what is up with that `QUIT` message? Is it part of the PROXY protocol? Remember, searching [the\nspec](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) for that\nstring did not find any matches. However, Matt actually read the spec, and found this section on version 2 of\nthe protocol:\n```\nThe binary header format starts with a constant 12 bytes block containing the\nprotocol signature :\n\n   \\x0D \\x0A \\x0D \\x0A \\x00 \\x0D \\x0A \\x51 \\x55 \\x49 \\x54 \\x0A\n```\n\nThose are indeed the bytes that make up \"\\r\\n\\r\\n\\0\\r\\nQUIT\\n\". Slightly less mnemonic than the header from text-based version 1 of the protocol:\n```\n- a string identifying the protocol : \"PROXY\" ( \\x50 \\x52 \\x4F \\x58 \\x59 )\n  Seeing this string indicates that this is version 1 of the protocol.\n```\n\nWell, I suppose that explains it.\n\nI believe our work here is done. Don't forget to like and subscribe!\n",[1286,915],{"slug":4521,"featured":6,"template":678},"this-sre-attempted-to-roll-out-an-haproxy-change","content:en-us:blog:this-sre-attempted-to-roll-out-an-haproxy-change.yml","This Sre Attempted To Roll Out An Haproxy Change","en-us/blog/this-sre-attempted-to-roll-out-an-haproxy-change.yml","en-us/blog/this-sre-attempted-to-roll-out-an-haproxy-change",{"_path":4527,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4528,"content":4533,"config":4539,"_id":4541,"_type":16,"title":4542,"_source":17,"_file":4543,"_stem":4544,"_extension":20},"/en-us/blog/learn-gitlab-devops-version-control",{"title":4529,"description":4530,"ogTitle":4529,"ogDescription":4530,"noIndex":6,"ogImage":3175,"ogUrl":4531,"ogSiteName":692,"ogType":693,"canonicalUrls":4531,"schema":4532},"GitLab tutorials for secure pipelines, Kubernetes, and more at Learn@GitLab","Learn@GitLab offers videos and self-driven demos so you can get the most out of GitLab at your own pace.","https://about.gitlab.com/blog/learn-gitlab-devops-version-control","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab tutorials for secure pipelines, Kubernetes, and more at Learn@GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2021-01-12\",\n      }",{"title":4529,"description":4530,"authors":4534,"heroImage":3175,"date":4536,"body":4537,"category":14,"tags":4538},[4535],"Chrissie Buchanan","2021-01-12","\nAt GitLab, we often say that it's not what you know, it's knowing where to look. But sometimes, finding answers isn’t so easy.\n\nAn autonomous, [self-service](/company/culture/all-remote/self-service/#proactive-approach-to-answering-questions), self-learning, and self-searching mindset is when you operate with the idea that your question has already been answered – somewhere. But we realized that for people interested in GitLab, or even those using GitLab, learning **how** to use it wasn’t always easy to find.\n\nWhile we stress the importance of having a [single source of truth](https://handbook.gitlab.com/handbook/values/#single-source-of-truth), we realized that when it came to learning about GitLab, there were almost too many places to look. We have [GitLab University](https://docs.gitlab.com/ee/index.html), our official [GitLab](https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg) and [GitLab Unfiltered](https://www.youtube.com/channel/UCMtZ0sc1HHNtGGWZFDRTh5A) YouTube pages where we regularly upload educational content, and of course, the [docs](https://docs.gitlab.com/). We needed to find a way to consolidate self-education and make it more intuitive.\n\n## What is Learn@GitLab?\n\n[Learn@GitLab](/learn/) is a learning portal where anyone can go to find self-driven demos and videos about using GitLab. Rather than just making Learn@GitLab _one more resource_, we’re iterating on this idea and consolidating our educational content so that it’s self-driven and easy to find.\n\nThe goal for Learn@GitLab is to present high quality, and accessible technical content that is easy to find on our website to help prospects and users educate themselves about GitLab. This content will include educational technical videos, as well as simulation/click-through demos, and tutorials. The content is organized by common topics such as [DevOps Platform](/solutions/devops-platform/), [version control](/topics/version-control/) and collaboration, and continuous integration, to name a few.\n\nWe’ve picked three of our favorite videos/tutorials for you to get a quick introduction to Learn@GitLab.\n\n## The benefits of a single DevOps platform\n\nWhen we talk about the benefits of GitLab, we often talk about how it saves time and how the single application reduces toolchain complexity. But what does that mean in the context of an ordinary toolchain using tools like GitHub, Jenkins, Jira, etc.?\n\nIn this super short video, we break down a typical toolchain according to three criteria: Integrations needed, clicks, and screen switches. How many times do you need to context switch for a simple task? We break it down for you.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/MNxkyLrA5Aw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Adding security to your GitLab CI/CD pipeline\n\nGitLab helps teams go from DevOps to DevSecOps. One of the ways we help is by allowing you to check your application for security vulnerabilities in your CI/CD pipelines that may lead to unauthorized access, data leaks, denial of services, or worse. GitLab reports these vulnerabilities in the merge request so you can fix them before they ever reach end users.\n\nThis quick video guides you through setting up and configuring GitLab security features, and setting up approval rules for merge requests.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/Fd5DhebtScg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## GitLab developer flow with Kubernetes\n\nIf you’re a developer, or even just managing a team of developers, you might want to see what a typical workflow would be like using GitLab. If you’re using [Kubernetes](/solutions/kubernetes/), seeing how GitLab works within a deployment environment is especially important.\n\nIn this technical demo, we use Amazon EKS as the deployment environment. We go over creating GitLab issues, merge requests, how to use Auto DevOps pipeline templates, review apps, advanced deployment techniques, and staging and production rollout – all in **just 15 minutes.**\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/TMQziI2VDbQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWhile we’ll continue to have educational content in other places on our site (and will continue to update them), Learn@GitLab will act as a front door for self education that is no more than two clicks from our homepage. With this new learning portal, we hope to teach people what problems GitLab can solve, but more importantly, show step-by-step _how_ GitLab solves them.\n\nFeel free to explore the different learning paths and comment below if you have any suggestions. Everyone can contribute.\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>&nbsp;&nbsp;\n[Go to Learn@GitLab](/learn/)!\n&nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n\nCover image by [Benjamin Davies](https://unsplash.com/@bendavisual?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/learn?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n",[915,232,726],{"slug":4540,"featured":6,"template":678},"learn-gitlab-devops-version-control","content:en-us:blog:learn-gitlab-devops-version-control.yml","Learn Gitlab Devops Version Control","en-us/blog/learn-gitlab-devops-version-control.yml","en-us/blog/learn-gitlab-devops-version-control",{"_path":4546,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4547,"content":4553,"config":4558,"_id":4560,"_type":16,"title":4561,"_source":17,"_file":4562,"_stem":4563,"_extension":20},"/en-us/blog/top-engineering-stories-gitlab",{"title":4548,"description":4549,"ogTitle":4548,"ogDescription":4549,"noIndex":6,"ogImage":4550,"ogUrl":4551,"ogSiteName":692,"ogType":693,"canonicalUrls":4551,"schema":4552},"These are your favorite GitLab engineering stories","From building a Web IDE, to our migration to GCP, to tracking down a bug in NFS – these are some of our most popular engineering blog posts.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681835/Blog/Hero%20Images/stairs_iteration.jpg","https://about.gitlab.com/blog/top-engineering-stories-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"These are your favorite GitLab engineering stories\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2021-01-07\",\n      }",{"title":4548,"description":4549,"authors":4554,"heroImage":4550,"date":4555,"body":4556,"category":14,"tags":4557},[3676],"2021-01-07","\n\nSome of our most popular and enduring engineering stories show how we use GitLab technology to take small steps to achieve major upgrades, fixes, and integrations to improve upon GitLab features. These stories demonstrate one of our core values at GitLab, [iteration](https://handbook.gitlab.com/handbook/values/#iteration) – meaning we ship the smallest changes first. When it comes to building new features or introducing fixes at GitLab, our engineering team operates under the principle that incremental change drives the greatest value.\n\n## How we executed on milestone migrations\n\n### Azure to GCP\n\nAzure simply was not cutting it for hosting GitLab.com, and we decided it was time to migrate GitLab over to Google Cloud Platform (GCP). This was no small decision or endeavor, and we documented our end-to-end process publicly in the hopes that other companies might learn from our experience. [Read the blog post describing the migration to GCP](/blog/gitlab-journey-from-azure-to-gcp/), or watch the video below to learn more about this major migration.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/Ve_9mbJHPXQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nNext, we explain how we analyzed data to see [how GitLab.com was performing on GCP after this major migration](/blog/gitlab-com-stability-post-gcp-migration/). Turns out, GitLab.com availability improved by 61% post-migration.\n\n### Upgrading PostgreSQL\n\nIn another blog post, one of GitLab.com’s main PostgreSQL clusters needed a major version upgrade. We knew it wouldn’t be easy, but in May 2020, we pulled off a [near-perfect execution of this substantial upgrade](/blog/gitlab-pg-upgrade/). We explain how the process unfolded, from planning to testing to full automation.\n\n### Moving to Kubernetes\n\n[Migrating GitLab.com over to Kubernetes](/blog/year-of-kubernetes/) was a painstaking and complex process. In one of our most popular blog posts last year, we share the trials and triumphs from the year after the migration.\n\n## Code detectives show their debugging work\n\nGitLab engineering fellow [Stan Hu](/company/team/#stanhu) explains [how debugging a bug in the Docker client library](/blog/tracking-down-missing-tcp-keepalives/) that was used in the GitLab runner taught him more about Docker, Golang, and even GitLab.\n\nBack in 2018, a customer flagged a bug in the NFS that the Support team escalated to Stan and his fellow engineers. It took _two weeks_ to hunt down the NFS bug that was disrupting the Linux kernel, and [Stan chronicles the intricacies of his investigation in this blog post](/blog/how-we-spent-two-weeks-hunting-an-nfs-bug/).\n\nAfter GitLab.com users reported getting the same, mysterious error message, our Scalability team rolled up their sleeves to figure out the origins of the message – and uncovered a complex problem.\n\n![Graph showing connection errors is part of the GitLab Scalability team's troubleshooting efforts](https://about.gitlab.com/images/blogimages/connectionerrorsgraph.png){: .shadow}\nGraph showing connection errors, grouped by second-of-the-minute, indicates a lot of clustering going on in the time dimension.\n{: .note .text-center}\n\nThere were [six key lessons we learned while debugging this scaling problem on GitLab.com](/blog/tyranny-of-the-clock/).\n\n## Using data for anomaly detection\n\nTwo years ago we switched over from our legacy NFS file-sharing service to Gitaly, and soon we noticed that our Gitaly service was lagging.\n\n![Graph showing lagging problems with Gitaly service](https://about.gitlab.com/images/blogimages/graph-01.png){: .shadow}\nWe noticed that the 99th percentile performance of the gRPC endpoint for Gitaly service had dropped from 400ms down to 100ms for an unknown reason.\n{: .note .text-center}\n\nThrough solid application monitoring, we were able to identify the problem and quickly fix it. [Unpack the process behind the Gitaly fix in this popular blog post](/blog/how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x/).\n\nPrometheus reports on time-series data, which can be used for anomaly detection and alerting. [Learn how you can use this data to set up analysis and alerting with Prometheus](/blog/anomaly-detection-using-prometheus/) and use the code snippets to try it out in your own system.\n\n## Inside GitLab\n\nWhen GitLab co-founder Dmitriy Zaporozhets built GitLab on Ruby on Rails, despite working mostly in PHP at the time. In this foundational blog post, our GitLab CEO, [Sid Sijbrandij](/company/team/#sytses), explains [why building on rails was the best decision for GitLab](/blog/why-we-use-rails-to-build-gitlab/).\n\nWe built our Web IDE to make it easier to edit code using GitLab. Explore [how we took the GitLab Web IDE from an experiment to working feature](/blog/introducing-gitlab-s-integrated-development-environment/).\n\n## The extensions and integrations that power us\n\n### How we built a VS Code extension\n\nAfter a survey revealed that VS Code was the most-used tool by our Frontend team, we decided to build a VS Code extension that works with GitLab. Learn [how we built the VS Code extension](/blog/gitlab-vscode-extension/) in a series of iterations.\n\nSoon, we found out our VS Code extension was very popular. So we wrote a blog post explaining [how users can develop their own extensions with VS Code and GitLab](/blog/vscode-extension-development-with-gitlab/).\n\n### Challenges with Elasticsearch\n\nElasticsearch enables global code search on GitLab.com and would allow us to run advanced syntax search and advanced global search of our codebase. But we ran into trouble with GitLab’s integration with Elasticsearch and [hit some dead ends on our first attempt to initiate the integration](/blog/enabling-global-search-elasticsearch-gitlab-com/). We recalibrated, learned from our mistakes, and [made a second attempt at the integration](/blog/elasticsearch-update/) a few months later.\n\n### Dogfooding at GitLab\n\nThe engineering productivity team at GitLab built Insights to examine trends in the GitLab.com issue tracker at a high-level, but soon realized Insights could be useful to our GitLab Ultimate users. Watch the video below or [read the blog post to explore the origins of Insights](/blog/insights/).\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/kKnQzS9qorc\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### How we reimagined the technical interview\n\nThe trouble with technical interviews is that they rarely reflect the job you’re interviewing for. Learn how former GitLab team member, Clement Ho, [reimagined the technical interview for Frontend engineers](/blog/the-trouble-with-technical-interviews/).\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/dNABW84sTzs\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\n## How troubleshooting and security modeling can prevent disaster\n\nIn a major feat of coordination, our globally distributed engineering team managed to work synchronously to troubleshoot an issue with our Hashicorp Consul, successfully avoiding any significant problems, including the outage we anticipated. Read \"[The consul outage that never happened](/blog/the-consul-outage-that-never-happened/)\" to learn how they did it.\n\nOur Red team at GitLab is continually searching for vulnerabilities, big and small, and introduces patches to make it function. In one of our most popular 2020 posts, [our security team explains how an attacker who already gained unauthorized access to the cloud platform might be able to take advantage of GCP privileges](/blog/plundering-gcp-escalating-privileges-in-google-cloud-platform/), and how replicating this breach scenario could help you prevent this from happening on your GCP instance.\n\n**Did we miss something?** Share a link to your favorite GitLab engineering story below and [check out our round-up of some of our top stories about how to apply GitLab technology](/blog/gitlab-for-cicd-agile-gitops-cloudnative/).\n\nCover image by [Jamie Saw](https://unsplash.com/@jsclick?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/series-of-stairs?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[915,232],{"slug":4559,"featured":6,"template":678},"top-engineering-stories-gitlab","content:en-us:blog:top-engineering-stories-gitlab.yml","Top Engineering Stories Gitlab","en-us/blog/top-engineering-stories-gitlab.yml","en-us/blog/top-engineering-stories-gitlab",{"_path":4565,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4566,"content":4572,"config":4578,"_id":4580,"_type":16,"title":4581,"_source":17,"_file":4582,"_stem":4583,"_extension":20},"/en-us/blog/how-we-prevented-security-fixes-leaking-into-our-public-repositories",{"title":4567,"description":4568,"ogTitle":4567,"ogDescription":4568,"noIndex":6,"ogImage":4569,"ogUrl":4570,"ogSiteName":692,"ogType":693,"canonicalUrls":4570,"schema":4571},"How we prevented security fixes leaking into our public repositories","Working in the open makes it difficult to work on security vulnerabilities before they're disclosed, especially when that openness discloses them early!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667227/Blog/Hero%20Images/security-leaks-unlocked.jpg","https://about.gitlab.com/blog/how-we-prevented-security-fixes-leaking-into-our-public-repositories","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we prevented security fixes leaking into our public repositories\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Robert Speicher\"}],\n        \"datePublished\": \"2021-01-04\",\n      }",{"title":4567,"description":4568,"authors":4573,"heroImage":4569,"date":4575,"body":4576,"category":14,"tags":4577},[4574],"Robert Speicher","2021-01-04","One of GitLab's core values is \"[public by default][],\" which means we develop in\nthe open whenever possible. One notable exception to this is security fixes,\nbecause developing security fixes in public discloses vulnerabilities before a\nfix is available, exposing ourselves and our users to attacks.\n\nIn order to work on these security issues in private, public GitLab projects\nhave a security mirror that's accessible only to GitLab engineers. A design flaw in GitLab's mirroring feature would cause commits from the\nSecurity repository to be exposed in the public repository before they were\nintended for release.\n\nIn this post we'll describe what the problem was and how we finally resolved it.\n\n[public by default]: https://handbook.gitlab.com/handbook/values/#public-by-default\n\n## Mirroring setup\n\nTo ensure that developers working on a security fix are working against the\nlatest code for a project, we utilize GitLab's [push mirror](https://docs.gitlab.com/ee/user/project/repository/mirror/index.html) feature to mirror\nthe public (\"Canonical\") repository to its private Security fork.\n\nOn every commit to the Canonical repository, the Security repository receives\nthe same commit. All of the mirroring is performed by the [Gitaly][gitaly]\nserver, which handles all of the Git calls made by GitLab.\n\nIn order to know which Git objects in the source are missing on the destination,\nGitLab would [fetch the remote][] and then tell Gitaly to perform the push that\nwould bring the two in sync, which is where the trouble starts.\n\nBy performing a fetch, _every Git object in the Security repository was now\nknown and stored on-disk by the Canonical repository_. If someone knew the SHA\nof a commit in the _private_ repository that contained a security fix, they\ncould view it in the _public_ repository and discover the vulnerability we were\nfixing before it had been publicly disclosed.\n\n[push mirror]: https://docs.gitlab.com/ee/user/project/repository/repository_mirroring.html\n[gitaly]: https://gitlab.com/gitlab-org/gitaly\n[fetch the remote]: https://gitlab.com/gitlab-org/gitlab/blob/f5bfe5603137b8f9cf60a2db759db3dbe5c60727/app/services/projects/update_remote_mirror_service.rb#L30\n\n## No guessing necessary\n\nThankfully, even a truncated Git commit SHA is difficult to guess, so at first\nglance this might not look like a high-severity issue.\n\nHowever, the [GitLab help page](https://gitlab.com/help) shows exactly which\ncommit is currently running, and we always deploy security fixes to GitLab.com\nfor verification and to protect our users against the latest threats. Here's\nwhat that might look like:\n\n> ### GitLab Enterprise Edition 13.7.0-pre [690e4bbfe94][]\n\nWhen a security release was in progress, any logged-in user could click on the\nrunning commit SHA and view the entire [source code](/solutions/source-code-management/) tree at that point, security\nfixes included!\n\n[690e4bbfe94]: https://gitlab.com/gitlab-org/gitlab/-/commits/690e4bbfe94\n\n## Experimenting with a fix\n\nThe mirroring setup was a crucial part of our development and release process,\nand the existing fetch-based behavior was itself a crucial piece of what made\nthe mirroring functionality work. During our initial investigation, there was no\nobvious fix. One proposed workaround was to simply remove the SHA from the Help\npage, but that would only hide the problem and \"security through obscurity\"\nisn't really security at all.\n\nAnother workaround, which we [ended up implementing][mirror pause], was to\npause the mirroring as soon as a security fix was merged, and re-enable it\nonce the security release was published. This prevented the leak because the\nfetch was no longer happening, but it would \"stop the world\" while we worked\non a security release. The Security mirror quickly fell behind public\ndevelopment, which created a risk of new features causing merge conflicts\nwith the security fixes, or vice versa.\n\nStaff engineer [Jacob Vosmaer][], who began the Gitaly project within GitLab,\n[pointed out][] that, strangely, we only used this fetch-based behavior for\nbranches; tags used Git's low-level [`ls-remote` command][ls-remote].\n\nWhereas Git's `fetch` command creates a local copy of every object from the\nremote repository, the `ls-remote` command only prints the remote's available\nreferences to the terminal. If we used `ls-remote` for branches like we did for tags, the commits from\nthe mirror would no longer be persisted on-disk, and thus wouldn't be\navailable in the public repository.\n\nBecause push mirroring is such a critical part of our own workflow as well as\nour users', we didn't want to just make the change and hope for the best. We\n[set up an experiment][], where the old functionality stayed exactly as it was,\nbut when a [feature flag][] was enabled, we'd also gather the same commit\ninformation using `ls-remote`, and compare the new results to the original,\nlogging any differences.\n\nThe experiment ran on GitLab.com for about a month without major discrepancies.\nIt looked like we had a solution!\n\n[mirror pause]: https://gitlab.com/gitlab-com/gl-infra/delivery/-/issues/626\n[Jacob Vosmaer]: /company/team/#jacobvosmaer-gitlab\n[pointed out]: https://gitlab.com/gitlab-org/gitlab/-/issues/38386#note_312363006\n[ls-remote]: https://git-scm.com/docs/git-ls-remote.html\n[set up an experiment]: https://gitlab.com/gitlab-org/gitaly/-/issues/2670\n[feature flag]: https://docs.gitlab.com/ee/operations/feature_flags.html\n\n## Iterating on the experiment\n\nConsidering the experiment a success, but still being wary of breaking a key\npiece of functionality, we proceeded with caution. Rather than replacing the old\nbehavior outright with the new, we [split the two paths based on a feature\nflag][split].\n\nWhen the flag was disabled the old, tried-and-true behavior would be used. With\nthe flag enabled, we'd use the new. We shipped this change and left the flag\nenabled, watching for errors.\n\nAfter two weeks without any reported mirroring errors, and with the security\nleak no longer occurring, we were satisfied we had found our fix.\n\nFirst we shipped a self-managed release [with the feature flag enabled by\ndefault][flag enabled], to ensure that if something unexpectedly broke for those\ninstallations it would be easy to revert to the previous behavior. Finally, after no errors reported from self-managed users, we [removed the\nfeature flag along with the old behavior][flag removal], and closed out the\nconfidential issue.\n\n[split]: https://gitlab.com/gitlab-org/gitaly/-/merge_requests/2183\n[flag enabled]: https://gitlab.com/gitlab-org/gitaly/-/merge_requests/2330\n[flag removal]: https://gitlab.com/gitlab-org/gitaly/-/merge_requests/2417\n\n## An annoying bug emerges\n\nShortly after making the new behavior the default, we started getting\n[complaints from team members][complaints]. They'd receive an automated email\ntelling them that a push mirror was broken, only to go check on the mirror and\nbe told everything was fine.\n\nThis went on for about two months due to the transient nature of the errors.\nEvery time we'd get an email and check to see if it was accurate, the mirroring\nreported everything was fine.\n\nAs we began to implement [a new piece of tooling][new tooling] that depended on\naccurate status reporting from push mirroring, the problem became bigger than a\nfew annoying, seemingly inaccurate emails; it was causing our tooling to behave\nerratically as well.\n\nBecause we had absolutely no idea what was happening or why, our first step was\nto [add logging][] when Gitaly was encountering an error that would mark the\nmirror as failed. The logging [revealed a weird anomaly][anomaly] where it\nappeared that the Security repository – the one _receiving_ updates – appeared\nto be _ahead_ of its source:\n\n```\nI, [2020-09-21T10:10:31] Divergent ref due to ancestry -- remote:f73bb2388a6, local:59812e04368\nI, [2020-09-21T10:26:39] Divergent ref due to ancestry -- remote:8ddcb3333da, local:f73bb2388a6\n```\n\nIn this pair, the first message is saying that the remote – the Security\nrepository – was showing its latest commit as `f73bb2388a6`, and that it wasn't\nan ancestor of the local `59812e04368` commit, causing the error message. On the\nnext run, we see that the local repository has \"caught up\" to the Security\nremote from the prior run.\n\nIt turned out that due to the number of branches and tags in this repository,\nthe `ls-remote` command was taking so long to complete that by the time the data\nwas returned, the local repository was updated by a new push.\n\nBecause we gathered the remote refs after the local ones, a network delay\ncreated a window for new local commits to be written and invalidate our list\nof local refs. Luckily there was a nice [boring solution][]: all we had to do\nwas [swap the order][] in which we gather references.\n\n[complaints]: https://gitlab.com/gitlab-com/gl-infra/delivery/-/issues/914\n[new tooling]: https://gitlab.com/gitlab-com/gl-infra/delivery/-/issues/1111\n[add logging]: https://gitlab.com/gitlab-com/gl-infra/delivery/-/issues/914#note_413855603\n[anomaly]: https://gitlab.com/gitlab-com/gl-infra/delivery/-/issues/914#note_416246505\n[boring solution]: https://handbook.gitlab.com/handbook/values/#boring-solutions\n[swap the order]: https://gitlab.com/gitlab-org/gitaly/-/merge_requests/2606\n\n## Wrapping up\n\nAs soon as we swapped the order for gathering references, the transient errors\nwent away and we finally got to close this long-standing issue. We were pleased\nwith how we were able to modify such a critical piece of functionality safely\nand without any negative user impact.\n\n## Related issues\n\n- [Security commits available on GitLab.com](https://gitlab.com/gitlab-com/gl-infra/delivery/-/issues/426)\n- [Do not expose GitLab version on GitLab.com](https://gitlab.com/gitlab-org/gitlab/-/issues/38386)\n- [Populate remote branches in-memory via `ls-remote` rather than using `fetch`](https://gitlab.com/gitlab-org/gitaly/-/issues/2670)\n- [Transient push mirror divergence errors](https://gitlab.com/gitlab-com/gl-infra/delivery/-/issues/914)\n\nPhoto by [iMattSmart](https://unsplash.com/@imattsmart?utm_source=unsplash&amp;utm_medium=referral&amp;utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/broken-lock?utm_source=unsplash&amp;utm_medium=referral&amp;utm_content=creditCopyText)\n{: .note}\n",[915,1307],{"slug":4579,"featured":6,"template":678},"how-we-prevented-security-fixes-leaking-into-our-public-repositories","content:en-us:blog:how-we-prevented-security-fixes-leaking-into-our-public-repositories.yml","How We Prevented Security Fixes Leaking Into Our Public Repositories","en-us/blog/how-we-prevented-security-fixes-leaking-into-our-public-repositories.yml","en-us/blog/how-we-prevented-security-fixes-leaking-into-our-public-repositories",{"_path":4585,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4586,"content":4592,"config":4597,"_id":4599,"_type":16,"title":4600,"_source":17,"_file":4601,"_stem":4602,"_extension":20},"/en-us/blog/cd-solution-overview",{"title":4587,"description":4588,"ogTitle":4587,"ogDescription":4588,"noIndex":6,"ogImage":4589,"ogUrl":4590,"ogSiteName":692,"ogType":693,"canonicalUrls":4590,"schema":4591},"How to use GitLab tools for continuous delivery","Learn how to use GitLab technology to release software faster and with less risk.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682956/Blog/Hero%20Images/CD-continuous-nature-cover-880x586.jpg","https://about.gitlab.com/blog/cd-solution-overview","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab tools for continuous delivery\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2020-12-17\",\n      }",{"title":4587,"description":4588,"authors":4593,"heroImage":4589,"date":4594,"body":4595,"category":14,"tags":4596},[1101],"2020-12-17","\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2021-04-01.\n\nEach organization is unique in how they adopt continuous delivery (CD) principles, but the journey to modernize and enhance your software release process can be conducted in phases. In this blog post, we unpack some of the tools companies can use to adopt continuous delivery (CD), and explain how companies can reach continuous delivery in three key stages. The good news is, regardless of how you get there, GitLab offers a solution that allows companies to modernize their release process at their own pace and in their own way.\n\n## Consolidate disparate tools into a single platform\n\nThe first step to reaching [continuous delivery](/topics/continuous-delivery/) is to consolidate the number of disparate tools in your pipeline by using the tools and capabilities baked into the GitLab product. In this section, we summarize some of the fundamental components of GitLab and give examples of how they work.\n\nGitLab users can track issues and merge requests using [milestones](https://docs.gitlab.com/ee/user/project/milestones/#milestones), which also help with setting time-bound goals. Milestones can be used as Agile sprints and releases, and allow you to organize issues and merge requests into a one group, with an optional start date and an optional due date.\n\n![Example of GitLab milestone from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/milestone.png)\nScreenshot shows example milestone in GitLab.\n\n[Issues are a fundamental tool in GitLab](https://docs.gitlab.com/ee/user/project/issues/#issues), and include many components to help users communication information about product problems, new features, and more.\n\n[Merge requests (MRs) are created to merge one branch into another](https://docs.gitlab.com/ee/user/project/merge_requests/). MRs are also where solutions are developed and is a key input to the release planning process.\n\nBoth issues and MRs are core components of a release and allow for the audit and tracking of application changes created by a large group of DevOps engineers, system administrators, and developers. We often use Epics in the release planning process. [Epics are used to track groups of issues with the same theme](https://docs.gitlab.com/ee/user/group/epics/#epics). In the example below, an Epic was created for all the UI-related issues in a project.\n\n![Example of GitLab epic for frontend work](https://about.gitlab.com/images/blogimages/cd-solution-overview/epic.png)\nAn example of an Epic for frontend work in GitLab.\n\n[Iterations are a relatively new tool that allows users to track issues over time](https://docs.gitlab.com/ee/user/group/iterations/#iterations) and helps to track velocity and volatility metrics. Iterations can also be used with milestones and can track a project's sprints using the detailed iterations pages, which include many progress metrics.\n\n![Example iteration from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/iteration.png)\nThis screenshot shows an example of how iterations work in GitLab.\n\nThe [Roadmap tool assembles epics, milestones, and iterations in a timeline format](https://docs.gitlab.com/ee/user/group/roadmap/#roadmap), which makes it easier to visually track all progress toward a release and helps the user streamline the release process.\n\n![Example of roadmap from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/roadmap.png)\nThis screenshot shows an example of roadmap in GitLab.\n\nGitLab offers many approval gates for your release. Set a [deploy freeze window](https://docs.gitlab.com/ee/ci/environments/deployment_safety.html) to temporarily suspend automated deployments to production. The deploy freeze window prevents unintended production releases during a particular time frame to help reduce uncertainty and risk of unscheduled outages.\n\n![Example of deploy freeze window from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/freeze.png)\nThis screenshow shows an example deploy freeze window in GitLab.\n\nRelated to the deploy freeze window, users can protect the production environment for a release to prevent unintentional releases. Deploy freeze windows protect the production environment by specifying who is allowed to deploy to the environment. Assigning specific roles and responsibilities streamlines the approval gates and release process.\n\n![protected-env](https://about.gitlab.com/images/blogimages/cd-solution-overview/protected-env.png)\n\nWhen it's ready, the [user can create the release which automatically generates the release evidence](https://docs.gitlab.com/ee/api/releases/#collect-release-evidence). This streamlined process helps reduce release cycle times.\n\n![Example of release evidence from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/release-evidence.png)\nHere is an example of release evidence from a demo project in GitLab.\n\n## Implement continuous delivery\n\nThe capabilities described above help to establish some best practices for software continuous delivery. In this next phase of the CD cycle, every change is automatically deployed to the User Acceptance Testing env/Staging (with a manual deployment to production). In this scenario, there is no need for a deploy freeze, and the release manager can cut a release from staging at any point in time.\n\n[GitLab Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) helps users automatically create the release pipeline and relieves them from manually creating a pipeline. With Auto DevOps, users can automatically deploy to the staging environment and manually deploy to production and enable canary deployments. Auto DevOps, which is based on DevOps best practices, helps you streamline the release process.\n\n![Example of enabling Auto DevOps from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/enable-auto-devops.png)\nHow to enable Auto DevOps in GitLab.\n\nThe first job in Auto DevOps is the build job, as shown below:\n\n![build-job](https://about.gitlab.com/images/blogimages/cd-solution-overview/build-job.png)\nThe build job in GitLab Auto DevOps.\n\nThe build job applies the appropriate build strategy to create a Docker image of the application and stores it in the built-in Docker Registry.\n\n![Example of container registry from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/container-registry.png)\nSee the example of a container registry in GitLab.\n\nFaster and more reliable releases happen when you have build components like Docker images that are consistent, uniform, and readily available throughout the release process. GitLab also includes a built-in [Package Registry](https://docs.gitlab.com/ee/user/packages/) that supports many packaging technologies.\n\n![Example of package registry from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/package-registry.png)\nHere's what the package registry looks like in GitLab.\n\n[Review Apps](https://docs.gitlab.com/ee/ci/review_apps/#review-apps) allow the user to visualize what features will go into production. As updates are made to the application via MRs, the MRs kick off Review Apps, which streamlines the review process, including the automatic creation and destruction of an ephemeral review environment. Using Review Apps, stakeholders can verify the updates to the application before the changes are merged to the main line. Review Apps help increase code quality reducing the risk of unexpected production outages.\n\n![Example Review Apps from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/review-apps.png)\nAn example of Review Apps in GitLab from a demo project.\n\nOnce an application is built and passes many automated tests, checks and verifications, the Auto DevOps pipeline automatically stands up a staging environment and deploys the application to staging.\n\n![Example staging environment from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/staging-env.png)\nAn example staging environment in GitLab.\n\nAt this point, a user can manually deploy the updated application as a canary deployment to the production environment. In doing so, a user ships features to only a portion of the pods fleet and watches their behavior as users visit the temporarily deployed feature. If everything checks out, the next step is to deploy the feature to production. After deploying to production, roll out the Canary deployment to 50% of the production pods. Incremental rollouts lower the risk of production outages and delivers a better user experience and customer satisfaction. Advanced deployment techniques, like canary, incremental, and Blue-Green also improve development and delivery efficiency, and streamlines the release process.\n\n![Example incremental rollout from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/rollout.png)\nHow incremental rollout works in GitLab.\n\n![live-env-button](https://about.gitlab.com/images/blogimages/cd-solution-overview/live-env-button.png)\nTo check the running application for integrity, you can click on the \"Open live environment\" button.\n\nClicking this button will open up the application in a different browser tab. But what if you run into an application error? As shown below:\n\n![Example application error from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/app-error.png)\nThis is what an application error will look like in GitLab.\n\nIf you encounter an app error, you could decide to perform a rollback by drilling down into the production environment page and identifying the release that had been running before the last deployment. This page is an auditable sequence of changes that have been applied to the production environment. The rollback process starts with the click of a button. Rollbacks speed up recovery of production in case of failures and lowers outage times, which improves the user experience.\n\n![Example rollback from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/rollback.png)\nRollback in GitLab to speed up production recovery.\n\nPipelines usually run automatically, but to schedule a pipeline once a day at midnight, for example, so staging can have the most recent version of the application each day, go to CI/CD->Schedules. Scheduling pipelines can improve the efficiency of the development life cycle and release processes.\n\n![Example of pipeline scheduling from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/pipeline-sched.png)\nHow to schedule a pipeline to run in the future.\n\nWhile the application is running in production, track how the release is performing and quickly identify and troubleshoot any production issues. There are a few ways to do this. One way is to access the \"Monitoring\" feature for a specific environment to track system and application metrics, such as system and pod memory usage, and the number of cores used. The monitoring tracking includes markers (small rocket icon) when updates were introduced to the environment, so that fluctuations in the metrics can be correlated to a specific update.\n\n![Example monitoring capabilities from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/monitoring.png)\nExplore monitoring capabilities in GitLab.\n\nMonitoring reduces the time to identify, resolve and preempt production problems, which lowers the risk of unscheduled outages. It also provides an opportunity for monitoring business activity and optimizes cloud costs. This type of monitoring is not only useful to release managers but also to DevOps engineers, application operators, and platform engineers.\n\nAnother way to monitor the release is by creating alerts to detect out-of-range metrics, which are visible on the overall operations metrics dashboard as well as on each specific environment window. Alerts can also automatically trigger ChatOps and email messages to appropriate individuals or groups.\n\n![Example alerts from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/alerts.png\nExample alerts in GitLab.\n\nYou can manage alerts from the [Operations Alerts window](https://docs.gitlab.com/ee/operations/incident_management/alerts.html), a single location from which you can assess and handle alerts, which may include the manual or automatic rollback of a release.\n\n![Example alerts dashboard from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/alerts-window.png)\nWhat the he alerts dashboard looks like on GitLab.\n\nUsers can track and monitor the release progress through [Value Stream Analytics](https://docs.gitlab.com/ee/development/value_stream_analytics.html#value-stream-analytics-development-guide), where you can check your project or group statistics over time and see how your team improves in the number of new issues, commits, deploys, and deployment frequency. Value Stream Analytics is useful to quickly determine the velocity of a given project. It points to bottlenecks in the development process, allowing management to uncover, triage, and identify the root cause of slowdowns in the software development life cycle.\n\n![Example value stream analytics from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/value-stream.png)\nValue stream analytics in GitLab.\n\nLastly, another way to track and monitor the release is through [Pipeline analytics](https://docs.gitlab.com/ee/user/analytics/ci_cd_analytics.html#pipeline-success-and-duration-charts). Pipeline analytics shows the history of your pipeline successes and failures, as well as how long each pipeline runs. This helps explain the health of your projects and their continuous delivery.\n\n![Example pipeline analytics from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/pipeline-analytics.png)\nScreenshot shows example pipeline analytics in GitLab.\n\nThe [Operations dashboard](https://docs.gitlab.com/ee/user/operations_dashboard/#operations-dashboard) can contain more than one project, and allows users to oversee more than one release. This dashboard provides a summary of each project's operational health, including pipeline and alert status.\n\n![Example operations dashboard from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/ops-dashboard.png)\nExample of operations dashboard in GitLab.\n\nRelease managers can also access the [environments dashboard](https://docs.gitlab.com/ee/ci/environments/environments_dashboard.html#environments-dashboard) to provide a cross-project, environment-based view that lets you see the big picture of what is happening in each environment.\n\n![Example environments dashboard from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/env-dashboard.png)\nThe environments dashboard in GitLab.\n\nAnother option is to drill down into a specific environment to see all the updates applied to the environment.\n\n![Example production environment dashboard from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/prod-env-dashboard.png)\nThe production environment dashboards shows all updates applied to the environment.\n\nAll these dashboards offer operations insights that are necessary to understand how a release is performing in production and quickly identify and troubleshoot any production issues.\n\n## Implement continuous deployment\n\nThe third phase in the journey is continuous deployment, where users can send updates directly to production. Instead of manually triggering deplyments, continuous deployment sends changes to production production auomatically (no human intervention is required). Teams can only achieve continuous deployment once continuous delivery is already in place.\n\nTo introduce a feature to a segment of end-users in a controlled manner in production, create [feature flags](/blog/feature-flags-continuous-delivery/). Feature flags help reduce risk and let the user conduct controlled tests and separate feature delivery from customer launch.\n\n![Example feature flag from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/feature-flag.png)\nFeatures flags in GitLab.\n\nA project's audit events dashboard will record what user introduced a feature flag.\n\n![Example audit events dashboard from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/events-dashboard.png)\nScreenshot shows example audit events dashboard in GitLab.\n\nCheck security and compliance-related items of the project by visiting the [Security dashboard](https://docs.gitlab.com/ee/user/application_security/security_dashboard/#gitlab-security-dashboards-and-security-center).\n\n![Example security dashboard from demo project](https://about.gitlab.com/images/blogimages/cd-solution-overview/sec-dashboard.png)\nThe security dashboard in GitLab.\n\nThese dashboards help you preempt out-of-compliance scenarios to avoid penalties. They also streamline audits, provide an opportunity to optimize cost, and lower risk of unscheduled production outages.\n\nWe have reviewed how GitLab can help you make your releases safe, low risk, worry-free, consistent, and repeatable.\n\nWhether you are just starting your journey into DevOps, or already in the midst of implementing DevOps processes, [GitLab's continuous delivery](/stages-devops-lifecycle/continuous-delivery/) can help you every step of the way with capabilities built on DevOps and CD best practices.\n\n## Watch and learn\n\nMore of a video person? Tune in below to see GitLab’s continuous delivery solution in action.\n\n\u003Chttps://www.youtube-nocookie.com/embed/L0OFbZXs99U>\n\nFor more information, visit [LEARN@GITLAB](/learn/).\n",[937,894,2932],{"slug":4598,"featured":6,"template":678},"cd-solution-overview","content:en-us:blog:cd-solution-overview.yml","Cd Solution Overview","en-us/blog/cd-solution-overview.yml","en-us/blog/cd-solution-overview",{"_path":4604,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4605,"content":4611,"config":4615,"_id":4617,"_type":16,"title":4618,"_source":17,"_file":4619,"_stem":4620,"_extension":20},"/en-us/blog/gitlab-for-cicd-agile-gitops-cloudnative",{"title":4606,"description":4607,"ogTitle":4606,"ogDescription":4607,"noIndex":6,"ogImage":4608,"ogUrl":4609,"ogSiteName":692,"ogType":693,"canonicalUrls":4609,"schema":4610},"How to use GitLab for Agile, CI/CD, GitOps, and more","Read our example engineering stories from the past two years that show how to use GitLab for you DevOps cycle, including GitOps, CI/CD and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681825/Blog/Hero%20Images/triangle_geo.jpg","https://about.gitlab.com/blog/gitlab-for-cicd-agile-gitops-cloudnative","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab for Agile, CI/CD, GitOps, and more\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-12-17\",\n      }",{"title":4606,"description":4607,"authors":4612,"heroImage":4608,"date":4594,"body":4613,"category":14,"tags":4614},[3676],"\n\nOn this blog, our community frequently shares tips, tricks, stories, and tutorials that demonstrate how to do different things with GitLab. This collection features some of our most popular and enduring how-to blog posts from the past two years, covering [CICD](/topics/ci-cd/), GitOps, Machine learning and more! See how various team members, companies, and users leverage GitLab to deliver software faster and more efficiently by reading and watching some of the tutorials we've featured.\n\n## Code review with GitLab\n\nWe know that code review is essential to effective collaboration, but the logistics of it all can be challenging. [Master code review by watching the demo](/blog/demo-mastering-code-review-with-gitlab/) included with this blog post.\n\n## Cool ways to use GitLab CI/CD\n\n### The basics of CI/CD\n\nBrand new to CI/CD? Read our [beginner's guide to the vocabulary and concepts](/blog/beginner-guide-ci-cd/).\n\nHere’s the [code you’ll need to build a CI/CD pipeline](/blog/how-to-create-ci-cd-pipeline-with-autodeploy-to-kubernetes-using-gitlab-and-helm/) with AutoDeploy to Kubernetes, using GitLab and Helm.\n\nNext, find the [code you'll need to build a CI pipeline with GitLab](/blog/basics-of-gitlab-ci-updated/), allowing you to run jobs sequentially, in parallel, or out of order.\n\n### Pipelines with CI/CD\n\nLearn how to [build a CI/CD pipeline in 20 minutes (or less) using GitLab’s AutoDevOps](/blog/building-a-cicd-pipeline-in-20-mins/) capabilities by following the instructions in this blog post, which is based on a popular GitLab Commit Brooklyn presentation that you can watch below.\n\nDiscover [how to trigger pipelines across multiple projects](/blog/cross-project-pipeline/) using GitLab CI/CD.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/-shvwiBwFVI\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### CI/CD with Android\n\nAndroid project users are in luck because in [this post we explain how to set up GitLab continuous integration (CI) functions](/blog/setting-up-gitlab-ci-for-android-projects/) in Android projects.\n\nGitLab and fastlane pair up to [help users publish applications to the iOS store](/blog/ios-publishing-with-gitlab-and-fastlane/) using a GitLab CI/CD runner.\n\n### CI/CD and GKE\n\n![GitLab CI/CD and GKE integration](https://about.gitlab.com/images/blogimages/gitlab-gke-integration-cover.png){: .shadow.medium.center}\n\nWe explain [how to get started with GitLab CI/CD and Google Kubernetes Engine (GKE)](/blog/getting-started-gitlab-ci-gcp/) in this initial demo.\n\nGitLab self-managed user? ✅\nUsing Google Kubernetes engine? ✅\nGreat! The [next tutorial is all about how to use GitLab CI to install GitLab runners on GKE](/blog/gitlab-ci-on-google-kubernetes-engine/) using our integration. It shouldn’t take you more than 15 minutes.\n\n## GitLab for machine learning\n\nBut what about GitLab for machine learning? We’ve got you covered. Watch the demo from GitLab Virtual Commit to see how you can use GitLab to leverage tasks for machine learning pipelines.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/DJbQJDXmjew\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## GitLab for Agile\n\nGitLab features work for many software development methodologies, including [Agile](/solutions/agile-delivery/).\n\nStart by [mapping Agile artifacts to GitLab features](/blog/gitlab-for-agile-software-development/) and explore how iteration works using GitLab.\n\n![GitLab issue board](https://about.gitlab.com/images/blogimages/issue-board.png){: .shadow.medium.left}\n\nThe GitLab issue board allows for flexible workflows and can be organized to represent [Agile software development](/topics/agile-delivery/) states.\n{: .note.text-center}\n\nThen go more in-depth to learn [how to use GitLab for Agile portfolio planning and project management](/blog/gitlab-for-agile-portfolio-planning-project-management/).\n\n## Giddy for GitOps?\n\n[GitOps](/topics/gitops/) takes DevOps best practices that are used for application development such as [version control](/topics/version-control/), collaboration, compliance, and CI/CD, and applies them to infrastructure automation.\n\nGitLab is the [DevOps platform](/topics/devops/) that does it all, and it’s built using Git, making it the ideal solution for GitOps processes.\n\nFirst, we explained [how GitLab and Ansible can be used together for GitOps](/blog/using-ansible-and-gitlab-as-infrastructure-for-code/) processes and [infrastructure as code](/topics/gitops/infrastructure-as-code/). In a follow-up post, we explain how [GitLab can also be paired with Terraform for GitOps](/topics/gitops/gitlab-enables-infrastructure-as-code/) and IaC.\n\nThe video on how to use Ansible and GitLab together has been viewed more than 13,000 times since it was first created in 2019, and is embedded for you below.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/M-SgRTKSeOg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Visibility\n\nOne of our principles at GitLab is to [dogfood everything](/handbook/engineering/development/principles#dogfooding), so you can rest assured that we aren’t about to introduce an engineering feature without first trying it out for ourselves. When it comes to our Insights tool though, the process happened in reverse. Our Engineering Productivity team at GitLab needed a particular tool, and as we built it, we realized it would benefit our GitLab Ultimate customers. Read on to [learn how our Insights tool came to be](/blog/insights/).\n\nDig into this [valuable explanation of how we discovered that Prometheus query language can be used to detect anomalies](/blog/anomaly-detection-using-prometheus/) in the time-series data that GitLab.com reports.\n\n## In the clouds\n\nWatch the demo to learn how GitLab runner and RedHat OpenShift can work together to jump start your application development and deployment to the cloud.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/yGWiQwrWimk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nAnd finally, although Docker Hub may be enforcing new rate limits, there's no need to panic. We [explain how to build a monitoring plug-in](/blog/docker-hub-rate-limit-monitoring/) to help you monitor the number of pull requests.\n\nCan you think of some other stand-out blog posts or demos that we should include here? Drop the link in a comment below.\n\nCover image by [Chris Robert](https://unsplash.com/@chris_robert) on [Unsplash](https://unsplash.com/photos/kY-uPDLXxHg)\n{: .note}\n",[832,937,915,2932],{"slug":4616,"featured":6,"template":678},"gitlab-for-cicd-agile-gitops-cloudnative","content:en-us:blog:gitlab-for-cicd-agile-gitops-cloudnative.yml","Gitlab For Cicd Agile Gitops Cloudnative","en-us/blog/gitlab-for-cicd-agile-gitops-cloudnative.yml","en-us/blog/gitlab-for-cicd-agile-gitops-cloudnative",{"_path":4622,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4623,"content":4629,"config":4635,"_id":4637,"_type":16,"title":4638,"_source":17,"_file":4639,"_stem":4640,"_extension":20},"/en-us/blog/deploy-aws",{"title":4624,"description":4625,"ogTitle":4624,"ogDescription":4625,"noIndex":6,"ogImage":4626,"ogUrl":4627,"ogSiteName":692,"ogType":693,"canonicalUrls":4627,"schema":4628},"How to deploy to AWS with GitLab","We believe deploying to the cloud should be easy and boring. The deployment process is the same regardless of what tech stack you're using so why not automate it?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672124/Blog/Hero%20Images/aws_rocket.jpg","https://about.gitlab.com/blog/deploy-aws","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to deploy to AWS with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Orit Golowinski\"}],\n        \"datePublished\": \"2020-12-15\",\n      }",{"title":4624,"description":4625,"authors":4630,"heroImage":4626,"date":4632,"body":4633,"category":14,"tags":4634},[4631],"Orit Golowinski","2020-12-15","\nCloud computing services are replacing traditional hardware technologies at an extremely fast pace. The majority of businesses worldwide are already moving their applications to the cloud — both public and private cloud — or plan to in the near future. Over a short period of time, this technology took over the market as businesses preferred remote access to data as well as the cloud's scalability, economy, and reach.\n\n## AWS Deployment: deploying applications to the cloud\n\nCOVID-19 and the resulting trend toward remote work forced organizations to adopt cloud technologies even if they hadn’t planned to originally. Software deployment to the cloud has also increased. Cloud is no longer just virtual machines, organizations are driving the use of [Containers as a Service (CaaS)](https://searchitoperations.techtarget.com/definition/Containers-as-a-Service-CaaS) due to their growing interest in leveraging containers to ease development and testing, speed up deployment, scale operations, and increase the efficiency of workloads running in the cloud.\n\nSince deployment to the cloud has become a standard practice, at GitLab we want to make this repeatable and [boring](https://handbook.gitlab.com/handbook/values/#boring-solutions). In this blog post, we explain how we've made it easier to deploy to Amazon Web Services (AWS) as part of your deployment process. We invite users to replicate this example to deploy to other cloud providers in a similar way.\n\nSince we want cloud deployment to be as flexible as possible (similar to a microservices architecture), we constructed atomic Docker images that function as building blocks. Users can use these images as part of their custom `gitlab-ci.yml` file or use our predefined `.gitlab-ci.yml` templates. We also added the ability to use [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) with the new AWS deployment targets.\n\n## AWS Deployment: how to use GitLab's official AWS Docker Images\n\n### AWS CLI Docker image\nIn [GitLab 12.6](/releases/2019/12/22/gitlab-12-6-released/), we provided an official GitLab [AWS cloud-deploy](https://gitlab.com/gitlab-org/cloud-deploy/-/blob/master/aws/cloud_deploy/Dockerfile) Docker image that downloads and installs the [AWS CLI](https://aws.amazon.com/cli/). This allows users to run `aws` commands directly from their pipelines. For more information, see [Run AWS commands from GitLab CI/CD](https://docs.gitlab.com/ee/ci/cloud_deployment/#run-aws-commands-from-gitlab-cicd).\n\n### CloudFormation stack creation Docker image\nIn [GitLab 13.5](/releases/2020/10/22/gitlab-13-5-released/), we provided a Docker image that runs a script that [creates a stack with CloudFormation](https://gitlab.com/gitlab-org/cloud-deploy/-/blob/master/aws/src/bin/gl-cloudformation). The `gl-cloudprovision create-stack` uses [aws cloudformation create-stack](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) behind the scenes. A JSON file based on the CloudFormation template must be passed to that command. For an example of this type of JSON file, see [`cf_create_stack.json`](https://gitlab.com/ebaque/jekyll-demo/-/blob/deploy-to-ec2/aws/cf_create_stack.json). With this type of JSON file, the command creates the infrastructure on AWS, including an EC2 instance directly from the `.gitlab-ci.yml` file. The script exists once we get confirmation that the stack setup is complete or has failed (through periodic polling).\n\n### Push to S3 and Deploy to EC2 Docker image\nIn [GitLab 13.5](/releases/2020/10/22/gitlab-13-5-released/) we also provided a Docker image with [Push to S3 and Deploy to EC2 scripts](https://gitlab.com/gitlab-org/cloud-deploy/-/blob/master/aws/src/bin/gl-ec2). The `gl-ec2 push-to-s3` script pushes source code to an S3 bucket. For an example of the JSON file to pass to the `aws deploy push` command, see [`s3_push.json`](https://gitlab.com/ebaque/jekyll-demo/-/blob/deploy-to-ec2/aws/s3_push.json). This code can be whatever artifact is built from a preceding build job. The `gl-ec2 deploy-to-ec2` script uses `aws deploy create-deployment` behind the scenes to create a deployment to an EC2 instance directly from the `.gitlab-ci.yml` file. For an example of the JSON template to pass, see [`create_deployment.json`](https://gitlab.com/ebaque/jekyll-demo/-/blob/deploy-to-ec2/aws/create_deployment.json). The script ends once we get confirmation that the deployment has succeeded or failed (via polling).\n\n## AWS Deployment: using GitLab CI templates to deploy to AWS\n\n### How to deploy to Elastic Container Service (ECS) with GitLab\nIn [GitLab 12.9](/releases/2020/03/22/gitlab-12-9-released/), we created a full `.gitlab-ci.yml` template called [`Deploy-ECS.giltab-ci.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Jobs/Deploy/ECS.gitlab-ci.yml) that deploys to Amazon ECS and extends support for Fargate. Users can include the template in their configuration, specify a few variables, and their application will be deployed and ready to go in no time. This template can be customized for your specific needs. For example: Replacing the selected container registry, changing the path of the file location, etc.\n\n### How to deploy to Elastic Cloud Compute (EC2) with GitLab\nIn [GitLab 13.5](/releases/2020/10/22/gitlab-13-5-released/), we created a full `.gitlab-ci.yml` template called [`CF-Provision-and-Deploy-EC2.gitlab-ci.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Jobs/Deploy/EC2.gitlab-ci.yml) that provisions the infrastructure by leveraging [AWS CloudFormation](https://aws.amazon.com/cloudformation/). It then pushes your previously-built artifact to an [AWS S3 bucket](https://aws.amazon.com/s3/) and deploys the pushed content to [AWS EC2](https://aws.amazon.com/ec2/).\n\n## AWS Deployment: security  considerations\n\n### Predefined AWS CI/CD variables\n\nIn order to deploy to AWS, you must use AWS security keys to connect to to your AWS instance. Users can define this security keys as [CI/CD environment](/topics/ci-cd/) variables that can be used by the deployment pipeline.\n\nIn [GitLab 12.9](/releases/2020/03/22/gitlab-12-9-released/), we added support for predefined AWS variables. This support function helps users know which variables are required for deploying to AWS and also prevents typos and spelling mistakes.\n\n| Env. variable name | Value|\n| --- | --- |\n| `AWS_ACCESS_KEY_ID` | Your Access key ID |\n| `AWS_SECRET_ACCESS_KEY` | Your Secret access key |\n| `AWS_DEFAULT_REGION` | Your region code |\n\n### \"Just-in-time\" guidance for AWS deployments\n\n[GitLab 13.1](/releases/2020/06/22/gitlab-13-1-released/) provides just-in-time guidance for users who wish to deploy to AWS. Setting up AWS deployments isn't always as easy as we'd like it to be, so we've added in-product links to our AWS templates and documentation when you start adding AWS CI/CD variables to make it easier for you to use our AWS features. This will help you get up and running faster.\n\n![In-product guidance for AWS](https://about.gitlab.com/images/blogimages/aws_guide.png)\n\nAWS guide from CI/CD variables\n\n### Added security for the GitLab's official AWS Docker images\n\nIn [GitLab 13.5](/releases/2020/10/22/gitlab-13-5-released/), we changed the image identifier from the release version number to the Docker image digest. Docker supports immutable image identifiers and we adopted this best practice to update our cloud-deploy images. When a new image is tagged, we also programmatically retrieve the image digest upon its build and create a release note to effectively communicate this digest to users. This guarantees that every instance of the service runs exactly the same code. You can roll back to an earlier version of the image, even if that version wasn't tagged (or is no longer tagged). This can even prevent race conditions if a new image is pushed while a deploy is in progress.\n\n![Docker Image Digest](https://about.gitlab.com/images/blogimages/digest1.png)\n\nDocker image digest or release tag\n\n## AWS Deployment: auto DevOps support\n\nGitLab already supports Kubernetes users deploying to AWS EKS cluster. Click the link to read instructions about [how to deploy an application to a GitLab-managed Amazon EKS cluster with Auto DevOps](/blog/deploying-application-eks/#:~:text=The%20Auto%20DevOps%20function%20at,build%2C%20and%20deploy%20your%20application).\n\nWe also expanded Auto DevOps to support non-Kubernetes users. Users can specify their deployment target by adding the `AUTO_DEVOPS_PLATFORM_TARGET` variable under the CI/CD variables settings. Specifying the deployment target platform builds a full CI/CD pipeline that deploys to AWS targets.\n\nWe currently support:\n\n- `AUTO_DEVOPS_PLATFORM_TARGET: ECS` (added in GitLab 13.0)\n- `AUTO_DEVOPS_PLATFORM_TARGET: FARGATE` (added in GitLab 13.2)\n- `AUTO_DEVOPS_PLATFORM_TARGET: EC2` (added in GitLab 13.6)\n\nFor more information about Auto DevOps for AWS targets, see [requirements for Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/requirements.html) documentation.\n\nHere's a quick recording for how to use Auto Deploy to Amazon ECS:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/HzRhLLFlAos\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\nSpeed run on how to use auto deploy to EC2 (animation):\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/rVr-vZfNL6U\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## AWS Deployment: Future plans to extend deployment support via GitLab\n\nCheck out some of the open issues below to see our plans are for the future of deploying to AWS using GitLab.\n\n- [Show AWS deployment success code in logs](https://gitlab.com/gitlab-org/gitlab/-/issues/215333): This will bring the success/failure codes from AWS into your GitLab pipeline logs, allowing you to see the deployment success code without needing to go into the AWS console to retrieve the logs.\n- [Show AWS deployment success code in pipeline view](https://gitlab.com/gitlab-org/gitlab/-/issues/232983): This will bring the success/failure codes from AWS into your GitLab pipeline, allowing you to see if the deployment job was successful in one view.\n- [Auto Deploy to AWS S3](https://gitlab.com/gitlab-org/gitlab/-/issues/219087): This will expand the supported deployment targets covered in this blog to include [S3 buckets](https://aws.amazon.com/s3/) as well.\n- [AWS integration per-environment role management](https://gitlab.com/gitlab-org/gitlab/-/issues/27107): This returns a set of temporary security credentials you can use to access AWS resources that you normally might not be able to access. This is accomplished by using the [AWS IAM](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) roles.\n\n## More material on deploying to EKS and Lambda\n\n- [Demo of how to deploy to EKS](https://docs.google.com/presentation/d/1iXnB6lvTx2_-_0ASElLUDZwyFPWILCRx54XjJkMFuw0/edit#slide=id.g6bb36a7017_2_42).\n- [Whitepaper on how to deploy on AWS from GitLab](/resources/whitepaper-deploy-aws-gitlab/).\n\nWe invite you to contribute to our other cloud provider solutions:\n\n- [Streamline GCP deployments](https://gitlab.com/groups/gitlab-org/-/epics/2706).\n- [Streamline Azure deployments](https://gitlab.com/groups/gitlab-org/-/epics/4846).\n\nAt GitLab, [everyone can contribute](/company/strategy/#contribute-with-gitlab). If you want to deploy to a target that isn't mentioned in this post, please let us know by adding an issue and linking it to our [Natively support hypercloud deployments](https://gitlab.com/groups/gitlab-org/-/epics/1804) epic.\n\nCover image by [SpaceX](https://unsplash.com/photos/uj3hvdfQujI) on [Unsplash](https://www.unsplash.com)\n",[873,894,832,937],{"slug":4636,"featured":6,"template":678},"deploy-aws","content:en-us:blog:deploy-aws.yml","Deploy Aws","en-us/blog/deploy-aws.yml","en-us/blog/deploy-aws",{"_path":4642,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4643,"content":4649,"config":4654,"_id":4656,"_type":16,"title":4657,"_source":17,"_file":4658,"_stem":4659,"_extension":20},"/en-us/blog/merge-trains-explained",{"title":4644,"description":4645,"ogTitle":4644,"ogDescription":4645,"noIndex":6,"ogImage":4646,"ogUrl":4647,"ogSiteName":692,"ogType":693,"canonicalUrls":4647,"schema":4648},"How to use merge train pipelines with GitLab","Read here an introduction on what merge trains are, how to use them and how to incorporate them to your GitLab project.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667210/Blog/Hero%20Images/merge-train-explained-banner.jpg","https://about.gitlab.com/blog/merge-trains-explained","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use merge train pipelines with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Veethika Mishra\"}],\n        \"datePublished\": \"2020-12-14\",\n      }",{"title":4644,"description":4645,"authors":4650,"heroImage":4646,"date":4651,"body":4652,"category":14,"tags":4653},[1140],"2020-12-14","This blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2021-01-20.\n{: .alert .alert-info .note}\n\n[Merge trains](https://docs.gitlab.com/ee/ci/pipelines/merge_trains.html) is a powerful GitLab feature that empowers users to harness the potential of [pipelines for merge results](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html) to the fullest and also automatically merge a series of (queued) merge requests (MRs) without breaking the target branch. However, due to the structural complexity of the concept, users are often unable to use it effectively for their projects and play it safe by restricting their usage to MRs that pose minimum or no conflict with the target branch.\n\nAs a [senior product designer for Continuous Integration (CI)](/company/team/#veethikaa), I often deconstruct certain concepts and logic for features related to CI so that I have a strong foundation of understanding when making design proposals. Recently, I had a chance to hold a discussion around a very interesting feature - merge trains — with the team. This post unpacks the concept of merge trains by explaining the difference between merge trains, pipelines for MRs, and pipelines for merge results.\n\n## Pipelines for merge requests\n\nGenerally, when a new merge request is created, a pipeline runs to check if the new changes are eligible to be merged to the target branch. This is called the pipeline for merge requests (MRs). A good practice is to only keep the necessary jobs for validating the changes at this step, so the pipeline doesn’t take a long time to complete and CI minutes are not overused. GitLab allows users to [configure the pipeline for MRs](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html) by adding `rules:if: $CI_MERGE_REQUEST_IID` to the jobs they wish to run for MRs.\n\n![Pipeline for merge request](https://about.gitlab.com/images/blogimages/merge-train-explained-pipeline-for-merge-requests.jpg)\n\n### Pipelines for merge results\n\nMerge request pipelines verify the branch in isolation. The target branch may change several times during the lifetime of the MR, and these changes are not taken into consideration. In the time during which the pipeline for the MR runs (and succeeds), if the target branch progresses in the background and a user merges the changes to the target branch, they might eventually end up with a broken target.\n\nWhen a [pipeline for merge results](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html) runs, GitLab CI performs a _pretend_ merge against the updated target branch by creating a commit on an internal ref from the source branch, and then runs a pipeline against it. This pipeline validates the result prior to merging, therefore increasing the chances of keeping the target branch green.\n\n![Pipeline for merge results](https://about.gitlab.com/images/blogimages/merge-train-explained-pipeline-for-merge-results.jpg)\n\nWe should keep in mind that this pipeline does not run automatically with every update to the target branch. To learn more about this feature in detail and understand the process of enabling it in your GitLab instance, you can refer to the [official documentation on merge results](https://docs.gitlab.com/ee/ci/pipelines/merged_results_pipelines.html).\n\nHowever, if a long time has passed since the last successful pipeline ran, by the time the MR is ready to be merged, the target branch may have already changed and advanced. If we go ahead and merge your MR without re-running the pipeline for MRs, we could end up with a broken target branch. Merge trains can prevent this from happening.\n\n### About merge trains\n\nPipeline for merge results is an extremely useful feature in itself, but tracking the right slot to merge the feature branch into the target and remembering to run the pipeline manually before doing so is a lot to expect from a developer buried in tasks that involve deep logical thinking.\n\nTo tackle this complexity in workflow, GitLab introduced [the merge trains feature](https://docs.gitlab.com/ee/ci/pipelines/merge_trains.html) in [GitLab Premium 12.0](/releases/2019/06/22/gitlab-12-0-released/#sequential-merge-trains). Merge trains allow users to capitalize on the capabilities of pipelines for merge results to automate the process of merging to the target branch with minimum chances of breaking it.\n\nWith merge trains enabled, a merge request can be added to the train, which takes care of it until merged.\nA merge train can be imagined as a queue of MRs that is automatically managed for you.\n\n#### How do merge trains work?\n\nWhen users queue up their MRs in a merge train, GitLab performs a pretend merge for each source branch on top of the previous branch in the queue, where the first branch on the train is merged against the target branch.\nBy creating a temporary commit for each of these merges, GitLab can run merged result pipelines.\nThe first MR in the queue, after having a successful pipeline run for MRs, gets merged to the target branch.\n\nEvery time a merge request is merged into the target branch, the pipelines for the newly added MRs in the train would run against the target branch and the newly added changes from the recently merged MR and changes that are from MRs already in the train.\n\n![Pipeline for merge results](https://about.gitlab.com/images/blogimages/merge-train-explained-working.gif)\n\nMerge trains carry an immense possibility for innovation with GitLab as a toolchain. But to be able to build upon the concept, it is imperative to have a holistic understanding of the same at the system level.\n\nHopefully, this post does the job of breaking down the concept into layman's terms, thereby opening doors for future collaboration within [stage groups](/handbook/product/categories/) at GitLab.\n\nHave suggestions around improving merge trains? please leave your thoughts on this [epic](https://gitlab.com/groups/gitlab-org/-/epics/5122).\n",[832,937,749,894,915],{"slug":4655,"featured":6,"template":678},"merge-trains-explained","content:en-us:blog:merge-trains-explained.yml","Merge Trains Explained","en-us/blog/merge-trains-explained.yml","en-us/blog/merge-trains-explained",{"_path":4661,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4662,"content":4668,"config":4675,"_id":4677,"_type":16,"title":4678,"_source":17,"_file":4679,"_stem":4680,"_extension":20},"/en-us/blog/basics-of-gitlab-ci-updated",{"title":4663,"description":4664,"ogTitle":4663,"ogDescription":4664,"noIndex":6,"ogImage":4665,"ogUrl":4666,"ogSiteName":692,"ogType":693,"canonicalUrls":4666,"schema":4667},"Running CI jobs in sequential, parallel, and custom orders","New to continuous integration? Learn how to build your first CI pipeline with GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662061/Blog/Hero%20Images/cicdcover.png","https://about.gitlab.com/blog/basics-of-gitlab-ci-updated","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The basics of CI: How to run jobs sequentially, in parallel, or out of order\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2020-12-10\",\n      }",{"title":4669,"description":4664,"authors":4670,"heroImage":4665,"date":4671,"body":4672,"category":14,"tags":4673,"updatedDate":4674},"The basics of CI: How to run jobs sequentially, in parallel, or out of order",[1019],"2020-12-10","Let's assume that you don't know anything about [continuous integration (CI)](/topics/ci-cd/) and [why it's needed](/blog/how-to-keep-up-with-ci-cd-best-practices/) in the software development lifecycle.\n\nImagine that you work on a project, where all the code consists of two text files. Moreover, it is super critical that the concatenation of these two files contains the phrase \"Hello, world.\"\n\nIf it's not there, the whole development team won't get paid that month. Yeah, it is that serious!\n\nThe most responsible software developer wrote a small script to run every time we are about to send our code to customers.\n\nThe code is pretty sophisticated:\n\n```bash\ncat file1.txt file2.txt | grep -q \"Hello world\"\n```\n\nThe problem is that there are 10 developers on the team, and, you know, human factors can hit hard.\n\nA week ago, a new guy forgot to run the script and three clients got broken builds. So you decided to solve the problem once and for all. Luckily, your code is already on GitLab, and you remember that there is [built-in CI](/solutions/continuous-integration/). Moreover, you heard at a conference that people use CI to run tests...\n\n## Let's run our first test inside CI\n\nAfter taking a couple of minutes to find and read the docs, it seems like all we need is these two lines of code in a file called `.gitlab-ci.yml`:\n\n```yaml\ntest:\n  script: cat file1.txt file2.txt | grep -q 'Hello world'\n```\n\nWe commit it, and hooray! Our build is successful:\n\n![build succeeded](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674096/Blog/Content%20Images/build_succeeded.png)\n\nLet's change \"world\" to \"Africa\" in the second file and check what happens:\n\n![build failed](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674096/Blog/Content%20Images/build_failed.png)\n\nThe build fails as expected!\n\nOK, we now have automated tests here! GitLab CI will run our test script every time we push new code to the source code repository in the DevOps environment.\n\n**Note:** In the above example, we assume that file1.txt and file2.txt exist in the runner host.\n\nTo run this example in GitLab, use the below code that first will create the files and then run the script.\n\n```yaml\ntest:\nbefore_script:\n      - echo \"Hello \" > | tr -d \"\\n\" | > file1.txt\n      - echo \"world\" > file2.txt\nscript: cat file1.txt file2.txt | grep -q 'Hello world'\n```\n\nFor the sake of compactness, we will assume that these files exist in the host, and will not create them in the following examples.\n\n## Make results of builds downloadable\n\nThe next business requirement is to package the code before sending it to our customers. Let's automate that part of the software development process as well!\n\nAll we need to do is define another job for CI. Let's name the job \"package\":\n\n```yaml\ntest:\n  script: cat file1.txt file2.txt | grep -q 'Hello world'\n\npackage:\n  script: cat file1.txt file2.txt | gzip > package.gz\n```\n\nWe have two tabs now:\n\n![Two tabs - generated from two jobs](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674096/Blog/Content%20Images/two_tabs.png)\n\nHowever, we forgot to specify that the new file is a build _artifact_, so that it could be downloaded. We can fix it by adding an `artifacts` section:\n\n```yaml\ntest:\n  script: cat file1.txt file2.txt | grep -q 'Hello world'\n\npackage:\n  script: cat file1.txt file2.txt | gzip > packaged.gz\n  artifacts:\n    paths:\n    - packaged.gz\n```\n\nChecking... it is there:\n\n![Checking the download button](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674096/Blog/Content%20Images/artifacts.png)\n\nPerfect, it is! However, we have a problem to fix: The jobs are running in parallel, but we do not want to package our application if our tests fail.\n\n## Run jobs sequentially\n\nWe only want to run the 'package' job if the tests are successful. Let's define the order by specifying `stages`:\n\n```yaml\nstages:\n  - test\n  - package\n\ntest:\n  stage: test\n  script: cat file1.txt file2.txt | grep -q 'Hello world'\n\npackage:\n  stage: package\n  script: cat file1.txt file2.txt | gzip > packaged.gz\n  artifacts:\n    paths:\n    - packaged.gz\n```\n\nThat should be good!\n\nAlso, we forgot to mention, that compilation (which is represented by concatenation in our case) takes a while, so we don't want to run it twice. Let's define a separate step for it:\n\n```yaml\nstages:\n  - compile\n  - test\n  - package\n\ncompile:\n  stage: compile\n  script: cat file1.txt file2.txt > compiled.txt\n  artifacts:\n    paths:\n    - compiled.txt\n\ntest:\n  stage: test\n  script: cat compiled.txt | grep -q 'Hello world'\n\npackage:\n  stage: package\n  script: cat compiled.txt | gzip > packaged.gz\n  artifacts:\n    paths:\n    - packaged.gz\n```\n\nLet's take a look at our artifacts:\n\n![Unnecessary artifact](https://about.gitlab.com/images/blogimages/the-basics-of-gitlab-ci/clean-artifacts.png)\n\nHmm, we do not need that \"compile\" file to be downloadable. Let's make our temporary artifacts expire by setting `expire_in` to '20 minutes':\n\n```yaml\ncompile:\n  stage: compile\n  script: cat file1.txt file2.txt > compiled.txt\n  artifacts:\n    paths:\n    - compiled.txt\n    expire_in: 20 minutes\n```\n\nNow our config looks pretty impressive:\n\n- We have three sequential stages to compile, test, and package our application.\n- We pass the compiled app to the next stages so that there's no need to run compilation twice (so it will run faster).\n- We store a packaged version of our app in build artifacts for further usage.\n\n## Learning which Docker image to use\n\nSo far, so good. However, it appears our builds are still slow. Let's take a look at the logs.\n\n![ruby3.1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674096/Blog/Content%20Images/ruby-31.png)\n\nWait, what is this? Ruby 3.1?\n\nWhy do we need Ruby at all? Oh, GitLab.com uses Docker images to [run our builds](/blog/shared-runners/), and [by default](https://docs.gitlab.com/ee/user/gitlab_com/#shared-runners) it uses the [`ruby:3.1`](https://hub.docker.com/_/ruby/) image. For sure, this image contains many packages we don't need. After a minute of Googling, we figure out that there's an image called [`alpine`](https://hub.docker.com/_/alpine/), which is an almost blank Linux image.\n\nOK, let's explicitly specify that we want to use this image by adding `image: alpine` to `.gitlab-ci.yml`.\n\nNow we're talking! We shaved nearly three minutes off:\n\n![Build speed improved](https://about.gitlab.com/images/blogimages/the-basics-of-gitlab-ci/speed.png)\n\nIt looks like there are a lot of public images around:\n- [mysql](https://hub.docker.com/_/mysql/)\n- [Python](https://hub.docker.com/_/python/)\n- [Java](https://hub.docker.com/_/java/)\n- [php](https://hub.docker.com/_/php/)\n\nSo we can just grab one for our technology stack. It makes sense to specify an image that contains no extra software because it minimizes download time.\n\n## Dealing with complex scenarios\n\nSo far, so good. However, let's suppose we have a new client who wants us to package our app into `.iso` image instead of `.gz`. Since CI does all the work, we can just add one more job to it. ISO images can be created using the [mkisofs](http://www.w3big.com/linux/linux-comm-mkisofs.html) command. Here's how our config should look:\n\n```yaml\nimage: alpine\n\nstages:\n  - compile\n  - test\n  - package\n\n# ... \"compile\" and \"test\" jobs are skipped here for the sake of compactness\n\npack-gz:\n  stage: package\n  script: cat compiled.txt | gzip > packaged.gz\n  artifacts:\n    paths:\n    - packaged.gz\n\npack-iso:\n  stage: package\n  script:\n  - mkisofs -o ./packaged.iso ./compiled.txt\n  artifacts:\n    paths:\n    - packaged.iso\n```\n\nNote that job names shouldn't necessarily be the same. In fact, if they were the same, it wouldn't be possible to make the jobs run in parallel inside the same stage of the software development process. Hence, think of same names of jobs and stages as coincidence.\n\nAnyhow, the build is failing:\n\n![Failed build because of missing mkisofs](https://about.gitlab.com/images/blogimages/the-basics-of-gitlab-ci/mkisofs.png)\n\nThe problem is that `mkisofs` is not included in the `alpine` image, so we need to install it first.\n\n## Dealing with missing software/packages\n\nAccording to the [Alpine Linux website](https://pkgs.alpinelinux.org/contents?file=mkisofs&path=&name=&branch=edge&repo=&arch=) `mkisofs` is a part of the `xorriso` and `cdrkit` packages. These are the magic commands that we need to run to install a package:\n\n```bash\necho \"ipv6\" >> /etc/modules  # enable networking\napk update                   # update packages list\napk add xorriso              # install package\n```\n\nFor CI, these are just like any other commands. The full list of commands we need to pass to `script` section should look like this:\n\n```yml\nscript:\n- echo \"ipv6\" >> /etc/modules\n- apk update\n- apk add xorriso\n- mkisofs -o ./packaged.iso ./compiled.txt\n```\n\nHowever, to make it semantically correct, let's put commands related to package installation in `before_script`. Note that if you use `before_script` at the top level of a configuration, then the commands will run before all jobs. In our case, we just want it to run before one specific job.\n\n## Directed Acyclic Graphs: Get faster and more flexible pipelines\n\nWe defined stages so that the package jobs will run only if the tests passed. What if we want to break the stage sequencing a bit, and run a few jobs earlier, even if they are defined in a later stage? In some cases, the traditional stage sequencing might slow down the overall pipeline execution time.\n\nImagine that our test stage includes a few more heavy tests that take a lot of time to execute, and that those tests are not necessarily related to the package jobs. In this case, it would be more efficient if the package jobs don't have to wait for those tests to complete before they can start. This is where Directed Acyclic Graphs (DAG) come in: To break the stage order for specific jobs, you can define job dependencies which will skip the regular stage order.\n\nGitLab has a special keyword `needs`, which creates dependencies between jobs, and allows jobs to run earlier, as soon as their dependent jobs complete.\n\nIn the below example, the pack jobs will start running as soon as the test job completes, so if, in future, someone adds more tests in the test stage, the package jobs will start to run before the new test jobs complete:\n\n```yaml\npack-gz:\n  stage: package\n  script: cat compiled.txt | gzip > packaged.gz\n  needs: [\"test\"]\n  artifacts:\n    paths:\n    - packaged.gz\n\npack-iso:\n  stage: package\n  before_script:\n  - echo \"ipv6\" >> /etc/modules\n  - apk update\n  - apk add xorriso\n  script:\n  - mkisofs -o ./packaged.iso ./compiled.txt\n  needs: [\"test\"]\n  artifacts:\n    paths:\n    - packaged.iso\n```\n\nOur final version of `.gitlab-ci.yml`:\n\n```yaml\nimage: alpine\n\nstages:\n  - compile\n  - test\n  - package\n\ncompile:\n  stage: compile\n  before_script:\n      - echo \"Hello  \" | tr -d \"\\n\" > file1.txt\n      - echo \"world\" > file2.txt\n  script: cat file1.txt file2.txt > compiled.txt\n  artifacts:\n    paths:\n    - compiled.txt\n    expire_in: 20 minutes\n\ntest:\n  stage: test\n  script: cat compiled.txt | grep -q 'Hello world'\n\npack-gz:\n  stage: package\n  script: cat compiled.txt | gzip > packaged.gz\n  needs: [\"test\"]\n  artifacts:\n    paths:\n    - packaged.gz\n\npack-iso:\n  stage: package\n  before_script:\n  - echo \"ipv6\" >> /etc/modules\n  - apk update\n  - apk add xorriso\n  script:\n  - mkisofs -o ./packaged.iso ./compiled.txt\n  needs: [\"test\"]\n  artifacts:\n    paths:\n    - packaged.iso\n```\n\nWow, it looks like we have just created a pipeline! We have three sequential stages, the jobs `pack-gz` and `pack-iso`, inside the `package` stage, are running in parallel:\n\n![Pipelines illustration](https://about.gitlab.com/images/blogimages/the-basics-of-gitlab-ci/pipeline.png)\n\n## Elevating your pipeline\n\nHere is how to elevate your pipeline.\n\n### Incorporating automated testing into CI pipelines\n\nIn DevOps, a key software development strategy rule is making really great apps with amazing user experience. So, let's add some tests in our CI pipeline to catch bugs early in the entire process. This way, we fix issues before they get big and before we move on to work on a new project.\n\nGitLab makes our lives easier by offering out-of-the-box templates for various [tests](https://docs.gitlab.com/ee/ci/testing/). All we need to do is include these templates in our CI configuration.\n\nIn this example, we will include [accessibility testing](https://docs.gitlab.com/ee/ci/testing/accessibility_testing.html):\n\n```yaml\nstages:\n  - accessibility\n\nvariables:\n  a11y_urls: \"https://about.gitlab.com https://www.example.com\"\n\ninclude:\n  - template: \"Verify/Accessibility.gitlab-ci.yml\"\n```\n\nCustomize the `a11y_urls` variable to list the URLs of the web pages to test with [Pa11y](https://pa11y.org/) and [code quality](https://docs.gitlab.com/ee/ci/testing/code_quality.html).\n\n```yaml\n   include:\n   - template: Jobs/Code-Quality.gitlab-ci.yml\n```\n\nGitLab makes it easy to see the test report right in the merge request widget area. Having the code review, pipeline status, and test results in one spot makes everything smoother and more efficient.\n\n![Accessibility report](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674096/Blog/Content%20Images/Screenshot_2024-04-02_at_10.56.41.png)\n\u003Ccenter>\u003Ci>Accessibility merge request widget\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\n![Code quality widget in MR](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674096/Blog/Content%20Images/Screenshot_2024-04-02_at_11.00.25.png)\n\u003Ccenter>\u003Ci>Code quality merge request widget\u003C/i>\u003C/center>\n\n### Matrix builds\n\nIn some cases, we will need to test our app in different configurations, OS versions, programming language versions, etc. For those cases, we'll use the [parallel:matrix](https://docs.gitlab.com/ee/ci/yaml/#parallelmatrix) build to test our application across various combinations in parallel using one job configuration. In this blog, we'll test our code with different Python versions using the matrix keyword.\n\n```yaml\npython-req:\n  image: python:$VERSION\n  stage: lint\n  script:\n    - pip install -r requirements_dev.txt\n    - chmod +x ./build_cpp.sh\n    - ./build_cpp.sh\n  parallel:\n    matrix:\n      - VERSION: ['3.8', '3.9', '3.10', '3.11']   # https://hub.docker.com/_/python\n```\n\nDuring pipeline execution, this job will run in parallel four times, each time using different Python image as shown below:\n\n![Matrix job running](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674096/Blog/Content%20Images/Screenshot_2024-04-02_at_11.12.48.png)\n\n### Unit testing\n\n#### What are unit tests?\n\nUnit tests are small, targeted tests that check individual components or functions of software to ensure they work as expected. They are essential for catching bugs early in the software development process and verifying that each part of the code performs correctly in isolation.\n\nExample: Imagine you're developing a calculator app. A unit test for the addition function would check if 2 + 2 equals 4. If this test passes, it confirms that the addition function is working correctly.\n\n#### Unit testing best practices\n\nIf the tests fail, the pipeline fails and users get notified. The developer needs to check the job logs, which usually contain thousands of lines, and see where the tests failed so that they can fix them. This check is time-consuming and inefficient.\n\nYou can configure your job to use [unit test reports](https://docs.gitlab.com/ee/ci/testing/unit_test_reports.html). GitLab displays reports on the merge request and on the pipelines details page, making it easier and faster to identify the failure without having to check the entire log.\n\n##### JUnit test report\n\nThis is a sample JUnit test report:\n\n![pipelines JUnit test report v13 10](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674097/Blog/Content%20Images/pipelines_junit_test_report_v13_10.png){: .shadow.center}\n\n### Integration and end-to-end testing strategies\n\nIn addition to our regular development routine, it's super important to set up a special pipeline just for integration and end-to-end testing. This checks that all the different parts of our code work together smoothly, including those [microservices](https://about.gitlab.com/topics/microservices/), UI testing, and any other components.\n\nWe run these tests [nightly](https://docs.gitlab.com/ee/ci/pipelines/schedules.html). We can set it up so that the [results automatically get sent to a special Slack channel](https://docs.gitlab.com/ee/user/project/integrations/gitlab_slack_application.html#notification-events). This way, when developers come in the next day, they can quickly spot any issues. It's all about catching and fixing problems early on!\n\n### Test environment\n\nFor some of the tests, we may need a test environment to properly test our apps. With GitLab CI/CD, we can automate the deployment of testing environments and save a ton of time. Since this blog mostly focuses on CI, I won't elaborate on this, but you can refer to this section in the [GitLab documentation](https://docs.gitlab.com/ee/topics/release_your_application.html).\n\n## Implementing security scans in CI pipelines\n\nHere are the ways to implement security scans in CI pipelines.\n\n### SAST and DAST integration\n\nWe're all about keeping our code safe. If there are any vulnerabilities in our latest changes, we want to know ASAP. That's why it's a good idea to add security scans to your pipeline. They'll check the code with every commit and give you a heads up about any risks. We've put together a product tour to walk you through adding scans, including static application security testing ([SAST](https://docs.gitlab.com/ee/user/application_security/sast/)) and dynamic application security testing ([DAST](https://docs.gitlab.com/ee/user/application_security/dast/)), to your CI pipeline.\n\n__Click__ the image below to start the tour.\n\n[![Scans product tour](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674096/Blog/Content%20Images/Screenshot_2024-04-14_at_13.44.42.png)](https://gitlab.navattic.com/gitlab-scans)\n\nPlus, with AI, we can dig even deeper into vulnerabilities and get suggestions on how to fix them. Check out this demo for more info.\n\n__Click__ the image below to start the tour.\n\n[![product tour explain vulnerability ](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674096/Blog/Content%20Images/Screenshot_2024-04-14_at_13.50.24.png)](https://tech-marketing.gitlab.io/static-demos/pt-explain-vulnerability.html)\n\n## Recap\n\nThere's much more to cover but let's stop here for now. All examples were made intentionally trivial so that you could learn the concepts of GitLab CI without being distracted by an unfamiliar technology stack. Let's wrap up what we have learned:\n\n1. To delegate some work to GitLab CI you should define one or more [jobs](https://docs.gitlab.com/ee/ci/jobs/) in `.gitlab-ci.yml`.\n2. Jobs should have names and it's your responsibility to come up with good ones.\n3. Every job contains a set of rules and instructions for GitLab CI, defined by [special keywords](#keywords).\n4. Jobs can run sequentially, in parallel, or out of order using [DAG](https://docs.gitlab.com/ee/ci/directed_acyclic_graph/index.html).\n5. You can pass files between jobs and store them in build artifacts so that they can be downloaded from the interface.\n6. Add [tests and security scans](https://docs.gitlab.com/ee/development/integrations/secure.html) to the CI pipeline to ensure the quality and security of your app.\n\nBelow are more formal descriptions of the terms and keywords we used, as well as links to the relevant documentation.\n\n### Keyword descriptions and documentation\n\n{: #keywords}\n\n| Keyword/term       | Description |\n|---------------|--------------------|\n| [.gitlab-ci.yml](https://docs.gitlab.com/ee/ci/yaml/) | File containing all definitions of how your project should be built |\n| [script](https://docs.gitlab.com/ee/ci/yaml/#script)        | Defines a shell script to be executed |\n| [before_script](https://docs.gitlab.com/ee/ci/yaml/#before_script) | Used to define the command that should be run before (all) jobs |\n| [image](https://docs.gitlab.com/ee/ci/docker/using_docker_images.html#what-is-image) | Defines what Docker image to use |\n| [stages](https://docs.gitlab.com/ee/ci/yaml/#stages)         | Defines a pipeline stage (default: `test`) |\n| [artifacts](https://docs.gitlab.com/ee/ci/yaml/#artifacts)     | Defines a list of build artifacts |\n| [artifacts:expire_in](https://docs.gitlab.com/ee/ci/yaml/#artifactsexpire_in) | Used to delete uploaded artifacts after the specified time |\n| [needs](https://docs.gitlab.com/ee/ci/yaml/#needs) | Used to define dependencies between jobs and allows to run jobs out of order |\n| [pipelines](https://about.gitlab.com/topics/ci-cd/cicd-pipeline/) | A pipeline is a group of builds that get executed in stages (batches) |\n\n## More on CI/CD\n\n- [GitLab’s guide to CI/CD for beginners](/blog/beginner-guide-ci-cd/)\n- [Get faster and more flexible pipelines with a Directed Acyclic Graph](/blog/directed-acyclic-graph/)\n- [Decrease build time with custom Docker image](http://beenje.github.io/blog/posts/gitlab-ci-and-conda/)\n- [Introducing the GitLab CI/CD Catalog Beta](https://about.gitlab.com/blog/introducing-the-gitlab-ci-cd-catalog-beta/)\n\n## FAQ\n\n### How do you choose between running CI jobs sequentially vs. in parallel?\n\nConsiderations for choosing between running CI jobs sequentially or in parallel include job dependencies, resource availability, execution times, potential interference, test suite structure, and cost considerations. For example, if you have a build job that must finish before a deployment job can start, you would run these jobs sequentially to ensure the correct order of execution. On the other hand, tasks such as unit testing and integration testing can typically run in parallel since they are independent and don't rely on each other's completion.\n\n### What are directed Acyclic Graphs in GitLab CI, and how do they improve pipeline flexibility?\n\nA Directed Acyclic Graph (DAG) in GitLab CI breaks the linear order of pipeline stages. It lets you set dependencies between jobs, so jobs in later stages start as soon as earlier stage jobs finish. This reduces overall pipeline execution time, improves efficiency, and lets some jobs complete earlier than in a regular order.\n\n### What is the importance of choosing the right Docker image for CI jobs in GitLab?\n\nGitLab utilizes Docker images to execute jobs. The default image is ruby:3.1. Depending on your job's requirements, it's crucial to choose the appropriate image. Note that jobs first download the specified Docker image, and if the image contains additional packages beyond what's necessary, it will increase download and execution times. Therefore, it's important to ensure that the chosen image contains only the packages essential for your job to avoid unnecessary delays in execution.\n\n## Next steps\n\nAs a next step and to further modernize your software development practice, check out the [GitLab CI/CD Catalog](https://docs.gitlab.com/ee/architecture/blueprints/ci_pipeline_components/) to learn how to standardize and reuse CI/CD components.",[832,726],"2024-04-24",{"slug":4676,"featured":6,"template":678},"basics-of-gitlab-ci-updated","content:en-us:blog:basics-of-gitlab-ci-updated.yml","Basics Of Gitlab Ci Updated","en-us/blog/basics-of-gitlab-ci-updated.yml","en-us/blog/basics-of-gitlab-ci-updated",{"_path":4682,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4683,"content":4689,"config":4695,"_id":4697,"_type":16,"title":4698,"_source":17,"_file":4699,"_stem":4700,"_extension":20},"/en-us/blog/how-to-fuzz-rust-code",{"title":4684,"description":4685,"ogTitle":4684,"ogDescription":4685,"noIndex":6,"ogImage":4686,"ogUrl":4687,"ogSiteName":692,"ogType":693,"canonicalUrls":4687,"schema":4688},"How to fuzz Rust code continuously","Learn why you should always fuzz test your Rust code, and the code you'll need to do it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681441/Blog/Hero%20Images/rust.jpg","https://about.gitlab.com/blog/how-to-fuzz-rust-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to fuzz Rust code continuously\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Yevgeny Pats\"}],\n        \"datePublished\": \"2020-12-03\",\n      }",{"title":4684,"description":4685,"authors":4690,"heroImage":4686,"date":4692,"body":4693,"category":14,"tags":4694},[4691],"Yevgeny Pats","2020-12-03","\n\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2020-12-17.\n{: .alert .alert-info .note}\n\n## What is fuzzing?\n\nFuzzing, also called [fuzz testing](/topics/devsecops/what-is-fuzz-testing/), is an automated software technique that involves providing semi-random data as an input to the test program in order to uncover bugs and crashes.\n\nIn this short tutorial we will discuss using `cargo-fuzz` for fuzzing Rust code.\n\n## Why fuzz Rust code?\n[Rust](https://www.rust-lang.org/) is a safe language (mostly) and memory corruption issues are a thing of the past so we don’t need to fuzz our code, right? Wrong!\nAny code, and especially where stability, quality, and coverage are important, is worth fuzzing.\nFuzzing can uncover logical bugs and denial-of-service issues in critical components that can lead to security issues as well.\n\nAs a reference to almost infinite amount of bugs found with cargo-fuzz (only the documented one) you can look at [the list of bugs found by fuzz-testing Rust codebases](https://github.com/rust-fuzz/trophy-case).\n\n## Cargo-fuzz\n\ncargo-fuzz is the current de-facto standard fuzzer for Rust and essentially it is a proxy layer to the well-tested [libFuzzer](https://llvm.org/docs/LibFuzzer.html) engine.\nThis means the algorithm and the interface is all based on libFuzzer, which is a widely-used, coverage-guided fuzzer for C/C++ and some other languages that implemented a proxy layer – just like cargo-fuzz.\n\nlibFuzzer (cargo-fuzz) and coverage-guided fuzzers in general have the following algorithm:\n\n```\n// pseudo code\nInstrument program for code coverage\nfor {\n  Choose random input from corpus\n  Mutate input\n  Execute input and collect coverage\n  If new coverage/paths are hit add it to corpus (corpus - directory with test-cases)\n}\n```\n\n## Building and running the fuzzer\n\nIf you are already familiar with this part you can skip to Continuous Fuzzing section.\n\nWe will start with [rust-fuzzing-example](https://gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/rust-fuzzing-example).\n\nFor the sake of the example, we have a simple [function](https://gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/rust-fuzzing-example/-/blob/master/src/lib.rs) with an off-by-one bug:\n\n```\npub fn parse_complex(data: &[u8]) -> bool{\n\tif data.len() == 5 {\n\t\tif data[0] == b'F' && data[1] == b'U' && data[2] == b'Z' && data[3] == b'Z' && data[4] == b'I' && data[5] == b'T' {\n\t\t\treturn true\n\t\t}\n\t}\n    return true;\n}\n```\n\nOur fuzz [function](https://gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/rust-fuzzing-example/-/blob/master/fuzz/fuzz_targets/fuzz_parse_complex.rs) will look like this and will be called by libFuzzer in an infinite loop with the generated data, according to the coverage-guided algorithm.\n\n```\n#![no_main]\n#[macro_use] extern crate libfuzzer_sys;\nextern crate example_rust;\n\nfuzz_target!(|data: &[u8]| {\n    let _ = example_rust::parse_complex(&data);\n});\n```\n\nTo run the fuzzer we need to build an instrumented version of the code together with the fuzz function.\ncargo-fuzz is doing for us the heavy lifting so it can be done using the following simple steps:\n\n```\n# cargo-fuzz is available in rust nightly\ndocker run -it rustlang/rust:nightly-stretch /bin/bash\ncargo install cargo-fuzz\n\n# Download the example repo, build, and run the fuzzer\ngit clone https://gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/rust-fuzzing-example/-/blob/master/fuzz/fuzz_targets/fuzz_parse_complex.rs\ncd example-rust\ncargo fuzz run fuzz_parse_complex\n\n## The output should look like this:\n#524288 pulse  cov: 105 ft: 99 corp: 6/26b lim: 517 exec/s: 131072 rss: 93Mb\n#1048576        pulse  cov: 105 ft: 99 corp: 6/26b lim: 1040 exec/s: 116508 rss: 229Mb\n==2208== ERROR: libFuzzer: deadly signal\n    #0 0x5588b8234961  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0x83961)\n    #1 0x5588b8262dc5  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xb1dc5)\n    #2 0x5588b8284734  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xd3734)\n    #3 0x5588b82845e9  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xd35e9)\n    #4 0x5588b826493a  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xb393a)\n    #5 0x7f93737e70df  (/lib/x86_64-linux-gnu/libpthread.so.0+0x110df)\n    #6 0x7f9373252ffe  (/lib/x86_64-linux-gnu/libc.so.6+0x32ffe)\n    #7 0x7f9373254429  (/lib/x86_64-linux-gnu/libc.so.6+0x34429)\n    #8 0x5588b82a4a06  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xf3a06)\n    #9 0x5588b82a1b75  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xf0b75)\n    #10 0x5588b824fa1b  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0x9ea1b)\n    #11 0x5588b82a442b  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xf342b)\n    #12 0x5588b82a3ee1  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xf2ee1)\n    #13 0x5588b82a3dd5  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xf2dd5)\n    #14 0x5588b82b6cd9  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0x105cd9)\n    #15 0x5588b82b6c94  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0x105c94)\n    #16 0x5588b824edda  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0x9ddda)\n    #17 0x5588b81c45b7  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0x135b7)\n    #18 0x5588b824f7e4  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0x9e7e4)\n    #19 0x5588b827da53  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xcca53)\n    #20 0x5588b82a4a18  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xf3a18)\n\nNOTE: libFuzzer has rudimentary signal handlers.\n      Combine libFuzzer with AddressSanitizer or similar for better crash reports.\nSUMMARY: libFuzzer: deadly signal\nMS: 2 ShuffleBytes-ChangeByte-; base unit: 89b92cdd9bcb9b861c47c0179eff7b3a9baafcde\n0x46,0x55,0x5a,0x5a,0x49,\nFUZZI\nartifact_prefix='/example-rust/fuzz/artifacts/fuzz_parse_complex/'; Test unit written to /example-rust/fuzz/artifacts/fuzz_parse_complex/crash-df779ced6b712c5fca247e465de2de474d1d23b9\nBase64: RlVaWkk=\n```\n\nThis find the bug in a few seconds, prints the “FUZZI” string that triggers the vulnerability and saves it to a file.\n\n## Running cargo-fuzz from CI\n\nThe best way to integrate go-fuzz fuzzing with Gitlab CI/CD is by adding additional stage and step to your `.gitlab-ci.yml`. It is straightforward and [fully documented](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/#configuration).\n\n```\ninclude:\n  - template: Coverage-Fuzzing.gitlab-ci.yml\n\nmy_fuzz_target:\n  extends: .fuzz_base\n  script:\n    - apt-get update -qq && apt-get install -y -qq git make clang cmake\n    - export CC=`which clang`\n    - export CXX=`which clang++`\n    - cargo install cargo-fuzz\n    - cargo fuzz run fuzz_parse_complex -- -runs=0\n    - ./gitlab-cov-fuzz run --regression=$REGRESSION -- ./fuzz/target/x86_64-unknown-linux-gnu/release/fuzz_parse_complex\n```\n\nFor each fuzz target you will have to create a step which extends `.fuzz_base` that runs the following:\n\n- Builds the fuzz target.\n- Runs the fuzz target via gitlab-cov-fuzz CLI.\n- For `$CI_DEFAULT_BRANCH` (can be override by `$COV_FUZZING_BRANCH`) will run fully fledged fuzzing sessions. For everything else including MRs will run fuzzing regression with the accumulated corpus and fixed crashes.\n\nThis will run your fuzz tests in a blocking manner inside your pipeline. There is also a possibility to run longer fuzz sessions asynchronously, as described in the [docs](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/#continuous-fuzzing-long-running-async-fuzzing-jobs).\n\nCheck out our [full documentation](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/) and the [example repo](https://gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/rust-fuzzing-example) and try adding fuzz testing to your own repos!\n\nCover image by [Zsolt Palatinus](https://unsplash.com/@sunitalap) on [Unsplash](https://unsplash.com/)\n",[703,726],{"slug":4696,"featured":6,"template":678},"how-to-fuzz-rust-code","content:en-us:blog:how-to-fuzz-rust-code.yml","How To Fuzz Rust Code","en-us/blog/how-to-fuzz-rust-code.yml","en-us/blog/how-to-fuzz-rust-code",{"_path":4702,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4703,"content":4708,"config":4713,"_id":4715,"_type":16,"title":4716,"_source":17,"_file":4717,"_stem":4718,"_extension":20},"/en-us/blog/vscode-extension-development-with-gitlab",{"title":4704,"description":4705,"ogTitle":4704,"ogDescription":4705,"noIndex":6,"ogImage":2284,"ogUrl":4706,"ogSiteName":692,"ogType":693,"canonicalUrls":4706,"schema":4707},"VS Code extension development with GitLab","As VS Code editor increases in popularity, find out how GitLab + VS Code can be used for extension development and how we develop the official GitLab VS Code extension.","https://about.gitlab.com/blog/vscode-extension-development-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"VS Code extension development with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tomas Vik\"}],\n        \"datePublished\": \"2020-11-30\",\n      }",{"title":4704,"description":4705,"authors":4709,"heroImage":2284,"date":4710,"body":4711,"category":14,"tags":4712},[3291],"2020-11-30","\n## What is Visual Studio Code (VSC)?\n\nMicrosoft Visual Studio Code (VS Code) is an extensible text editor. It's implemented in TypeScript and runs on Node 12 and Electron. It was [first released in 2015](https://github.com/microsoft/vscode/releases/tag/0.10.1), and since then, become widely popular[^2]. This post explains the basics about the development of VS Code extensions, shows how you can use GitLab for extension development, and shares how we build the official [GitLab VS Code extension](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow).\n\n## VS Code editor key features\n\nFor me, the key feature of the VS Code editor is that it created a platform for extensions. That means not just providing an API for extensions (which editors have done since the '90s [^3]) but also providing a marketplace and seamless way of publishing and updating extensions.\n\nThere is also a fully open source version of the VS Code called [VSCodium](https://vscodium.com/). This version removes some proprietary Microsoft code from the distribution and is analogous to the Google Chrome and Chromium projects.\n\n## VS Code extension\n\nVS Code extension is a JavaScript or TypeScript app that runs in node and has access to the [VS Code Extension API](https://code.visualstudio.com/api). The convenient thing about this architecture is that the extension is like any other node app and has full access to the host machine and network. It can choose its own library for network connection, manipulating file systems, and also for rendering web UI.\n\n## Extension API\n\nThe extension API is implemented in TypeScript; it allows users to manipulate almost every aspect of the editor. After months of using it, I find the design elegant (with the exception of testing, which seems to be an afterthought in many areas of the API).\n\nThe main features of the API are manipulating and searching the files, editing text, creating custom left panels and status bars, debuggers, custom webview tabs, (Jupyter) notebook providers, and more. The API also provides a simple way to communicate with the user via input fields and quick-pick panels, as well as showing output with info, warning, or error messages.\n\n## Extension Marketplace\n\nIf you are familiar with either AppStore or PlayStore, you'll find VS Code has an equivalent store called [Visual Studio Marketplace](https://marketplace.visualstudio.com/search?target=VSCode), and unlike on its older siblings, everything[^4] is for free. Both the easy browsing experience for the user and the ease of use for a developer are differentiators for VS Code.\n\nAs a developer, you set up your [Azure Cloud token](https://code.visualstudio.com/api/working-with-extensions/publishing-extension#get-a-personal-access-token) and then run `vsce publish` in your extension folder. That's it, within a few minutes, most of your users[^5] are running the latest and greatest version of your extension. This process greatly reduces the pressure on developers to get everything right before releasing, enabling faster iteration.\n\nThere is also an independent marketplace called [open-vsx](https://open-vsx.org/) used mainly by VSCodium but also by [GitPod](https://docs.gitlab.com/ee/integration/gitpod.html) and others.\n\n## Developing extensions in GitLab\n\nIf you'd like to try and develop your own extension, you can fork the [`gitlab-example-extension`](https://gitlab.com/viktomas/gitlab-example-extension) project. It contains a complete setup for linting, unit and integration testing, and publishing the extension to both [Visual Studio Marketplace](https://marketplace.visualstudio.com/search?target=VSCode) and [open-vsx](https://open-vsx.org/). Thanks to GitLab being a single platform for the whole [DevOps lifecycle](/topics/devops/), you can just push your code changes to GitLab, and CI/CD takes care of everything else. As always, if you find any useful tweaks, please submit an MR because [everyone can contribute](/company/mission/#mission).\n\nYou can see what the VS Code extension API offers in the [official documentation](https://code.visualstudio.com/api). You can then have a look at [extension examples](https://code.visualstudio.com/api/extension-guides/overview) and extend them to make the VS Code editor do almost anything you want.\n\n## Our extension: GitLab Workflow\n\nIn June the [GitLab Workflow extension became officially supported by GitLab](/blog/use-gitlab-with-vscode/). Since then we've done a lot of cleanup work and bug fixes. Recently, we released our first larger feature: [Inserting GitLab project snippets](https://about.gitlab.com/releases/2020/11/22/gitlab-13-6-released/#insert-gitlab-snippets-directly-in-vs-code).\n\nThe primary purpose of the extension is to integrate GitLab features into the editor, so users don't have to leave the editor to perform basic tasks such as read an issue description or create a snippet from the code. The extension is trying to plug in the GitLab features into an existing VS Code Extension API to both minimise the need for custom code and to make the experience as VS Code-like as possible.\n\nThere are several main areas of the VS Code Extension API that we take advantage of:\n\n### Commands\n\n[Commands](https://code.visualstudio.com/api/extension-guides/command) are a versatile concept for triggering actions. The most common way to trigger commands is to use the \u003Ckbd>Cmd\u003C/kbd>+\u003Ckbd>Shift\u003C/kbd>+\u003Ckbd>P\u003C/kbd> Command Palette. But commands can also be triggered from context menus, clicks on buttons, or even programmatically by other code in the extension. The most common example of triggering commands programatically is to call the `vscode.open` command with a URL as a parameter. GitLab workflow does that every time we open the GitLab web page[^6].\n\n![Command Palette](https://about.gitlab.com/images/blogimages/vscode-extension-development-with-gitlab/commands.png){: .shadow.medium.center}\nCommand Palette in GitLab Workflow\n{: .note .text-center}\n\n### Tree View\n\nVS Code uses the [Tree View](https://code.visualstudio.com/api/extension-guides/tree-view) for displaying the left panel. The panel shows the file tree for the project, changed Git files, an outline of the open file, full-text search results, and more. We use this Tree View panel to show lists of issues and merge requests.\n\n![Tree View](https://about.gitlab.com/images/blogimages/vscode-extension-development-with-gitlab/tree-view.png){: .shadow.medium.center}\nTree View in GitLab Workflow\n{: .note .text-center}\n\n### Status bar\n\n[Status bar](https://code.visualstudio.com/api/extension-capabilities/extending-workbench#status-bar-item) is the slim panel at the bottom of the editor. Any extension can add items to it. Extensions such as Git, spell checks, linters, and formatters all add items to the status bar to provide the user with quick feedback.\n\nThe GitLab Workflow extension shows the MR, issue, and pipeline for the current branch. It, for example, allows you to see if your pipeline failed after the last push.\n\n![Status bar](https://about.gitlab.com/images/blogimages/vscode-extension-development-with-gitlab/status-bar.png){: .shadow.medium.center}\nStatus bar in GitLab Workflow\n{: .note .text-center}\n\nAltogether the VS Code API provides a great foundation for bringing GitLab features closer to the editor. The GitLab VS Code extension is an exciting project that **you too can contribute to**. The best place to start is the [GitLab project page](https://gitlab.com/gitlab-org/gitlab-vscode-extension).\n\n[^2]: [17th most popular project on GitHub](https://github.com/search?p=2&q=stars%3A%3E100&s=stars&type=Repositories) at the time of writing (2020-11-20)\n[^3]: GNU Emacs supported Lisp extensions in [1985](https://en.wikipedia.org/wiki/Emacs#GNU_Emacs)\n[^4]: I haven't been able to find a paid extension in the store.\n[^5]: The auto-update feature is on by default in VS Code, but it can be turned off in which case your users are not going to auto-update.\n[^6]: [Using `vscode.open` in the GitLab Workflow](https://gitlab.com/search?utf8=%E2%9C%93&search=vscode.open&group_id=9970&project_id=5261717&scope=&search_code=true&snippets=false&repository_ref=main&nav_source=navbar)\n\n[Cover image](https://art.ljubicapetkovic.com/cc-licensed/) by [Ljubica Petkovic](https://art.ljubicapetkovic.com), licensed under [CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/)\n{: .note}\n",[232,726,110],{"slug":4714,"featured":6,"template":678},"vscode-extension-development-with-gitlab","content:en-us:blog:vscode-extension-development-with-gitlab.yml","Vscode Extension Development With Gitlab","en-us/blog/vscode-extension-development-with-gitlab.yml","en-us/blog/vscode-extension-development-with-gitlab",{"_path":4720,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4721,"content":4727,"config":4733,"_id":4735,"_type":16,"title":4736,"_source":17,"_file":4737,"_stem":4738,"_extension":20},"/en-us/blog/keep-git-history-clean-with-interactive-rebase",{"title":4722,"description":4723,"ogTitle":4722,"ogDescription":4723,"noIndex":6,"ogImage":4724,"ogUrl":4725,"ogSiteName":692,"ogType":693,"canonicalUrls":4725,"schema":4726},"How to keep your Git history clean with interactive rebase","Interactive rebase is one of Git’s most versatile tools. Here's how to use it to correct commit messages, fix mistakes, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662593/Blog/Hero%20Images/title-image.png","https://about.gitlab.com/blog/keep-git-history-clean-with-interactive-rebase","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to keep your Git history clean with interactive rebase\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tobias Günther\"}],\n        \"datePublished\": \"2020-11-23\",\n      }",{"title":4722,"description":4723,"authors":4728,"heroImage":4724,"date":4730,"body":4731,"category":14,"tags":4732},[4729],"Tobias Günther","2020-11-23","\n## What is interactive rebase? \n\nInteractive [rebase](/solutions/source-code-management/), or Git rebase interactive, is sometimes called the \"Swiss Army Knife\" of Git – because it contains so many different tools, for so many different use cases! However, there's one main, overarching use case: _cleaning up your local commit history_.\n\nMind the word \"local\": it should only be used for cleaning up your own, local commit history, for example before integrating one of your feature branches into a team branch. In contrast, it should NOT be used on commit history that has already been pushed and shared on a remote repository. Interactive rebase is one of those tools that \"rewrite\" Git history – and you shouldn't do this on commits that have already been shared with others.\n\nWith this little warning message out of the way, let's look at some practical examples! \n\nNote: for easier visualization of the scenarios and workflows in this post, I’ve been using the [\"Tower\" Git desktop GUI](https://www.git-tower.com/?utm_source=gitlab&utm_medium=guestpost&utm_campaign=interactive-rebase) in some of my screenshots.\n{: .note}\n\n## Correcting an old commit message with Git rebase interactive\n\nSometimes you notice a typo in an **old commit message** – or you've forgotten to mention something in the description that is noteworthy. If we were talking about the _very last_ commit, we could have simply used the `--amend` option of the `git commit` command. But for older commits you will have to use interactive rebase to change them after the fact.\n\nHere's an example of a commit message gone horribly wrong that we want to correct:\n\n![A bad commit message that needs correction](https://about.gitlab.com/images/blogimages/how-to-keep-your-git-history-clean-with-interactive-rebase/bad-commit-message@2x.png){: .shadow.medium.center}\nA bad commit message that needs correction\n{: .note.text-center}\n\nThe first step in _any_ Git interactive rebase session is to **determine what part of commit history you want to manipulate**. To again take the above example: in order to change this bad commit we have to start the session at its _parent_ commit.\n\n![Starting our interactive rebase session](https://about.gitlab.com/images/blogimages/how-to-keep-your-git-history-clean-with-interactive-rebase/start-at-parent-commit@2x.png){: .shadow.medium.center}\nStarting our interactive rebase session\n{: .note.text-center}\n\nWe can now feed this starting commit's hash to the Git rebase interactive command:\n\n```\n$ git rebase -i 0023cddd\n```\n\nAn editor window will now open, containing a list of the commits that you just selected for manipulation. And don't be surprised because they are in _reverse order_: in an interactive rebase session, Git will reapply the old commits, item after item – which means that reversing the order is correct from Git's perspective.\n\n![Editor window with the selected commits](https://about.gitlab.com/images/blogimages/how-to-keep-your-git-history-clean-with-interactive-rebase/editor-window-start-ir@2x.png){: .shadow.medium.center}\nEditor window with the selected commits\n{: .note.text-center}\n\nOne other important thing to note about this editor window: _you don't perform the actual manipulations here_! Or, in this concrete example, you do NOT go ahead and change the commit message here. Instead, you only mark the commit you want to change with an action keyword. In our case, because we want to change a commit’s message, we mark the line with \"reword\". If you then save and close this editor window, a new one will open, containing the old commit’s message. Now is the time to finally make your changes:\n\n![Finally, we can make our changes](https://about.gitlab.com/images/blogimages/how-to-keep-your-git-history-clean-with-interactive-rebase/correct-commit-message.gif){: .shadow.medium.center}\nFinally, we can make our changes\n{: .note.text-center}\n\nAfter saving and closing once more, the interactive rebase session is complete and our old commit message has been corrected!\n\n## Combining multiple commits into one using interactive rebase\n\nAnother use case for interactive rebase is when you want to **combine multiple old comments into one**. Although, of course, the golden rule of version control applies: in most situations, it's beneficial to create more and smaller commits instead of a few big ones. However, as with everything, we might find that we have overdone this and now want to meld two or more old commits into a single one.\n\nTo make a concrete example, let's say we want to combine the following selected commits into a single one:\n\n![Let's combine multiple commits into one](https://about.gitlab.com/images/blogimages/how-to-keep-your-git-history-clean-with-interactive-rebase/squash-selected-commits@2x.png){: .shadow.medium.center}\nLet's combine multiple commits into one\n{: .note.text-center}\n\nJust like in our first case, we begin by starting the interactive rebase session at least at the parent commit of the one we want to manipulate.\n\n```\n$ git rebase -i 2b504bee\n```\n\nAgain, an editor window will open, listing that part of our commit history that we want to manipulate:\n\n![Marking lines with \"squash\"](https://about.gitlab.com/images/blogimages/how-to-keep-your-git-history-clean-with-interactive-rebase/squash-mark-commit@2x.png){: .shadow.medium.center}\nMarking lines with \"squash\"\n{: .note.text-center}\n\nThe action keyword we are going to use here is called \"squash.\" And there's only one important piece of information you need to know about squash in order to use it: _the line we mark up with the \"squash\" keyword will be combined with the line directly above_. That’s why, as you can see in my screenshot above, I’ve marked line #2 with \"squash\" in order to combine it with line #1.\n\nWe can now save and close the editor window and again watch and a new window appear: we are now asked to provide a commit message for the new commit that is created when combining those two old ones.\n\n![Entering a new message for the new, squashed commit](https://about.gitlab.com/images/blogimages/how-to-keep-your-git-history-clean-with-interactive-rebase/squash-enter-new-message@2x.png){: .shadow.medium.center}\nEntering a new message for the new, squashed commit\n{: .note.text-center}\n\nAfter saving and closing this editor window, you will see that a new commit was created that contains the changesets of both old commits. Voila!\n\n## Fixing a mistake with interactive rebase\n\nAnother use case for interactive rebase is when you found a mistake in one of your earlier commits. And it doesn't matter what exactly you messed up: you could have forgotten to add a certain change, should have deleted a file, or simply introduced a typo...\n\nThe natural tendency, in such a situation, is to simply create a new commit that corrects the mistake. But on the other hand, this will mess up our commit history: making an original commit, and then adding a \"band-aid\" commit just to fix some mistakes… that’s a messy way of working. Your commit history will soon become hard to understand, because it's littered with all those little \"quick fix commits\"!\n\nThis is where \"fixup,\" one of the tools that come with interactive rebase, comes in very handy. Fixup takes this \"quick fix\" commit, applies its changes to the original commit (thereby correcting it), and then gets rid of the band-aid commit:\n\n![How \"fixup\" works](https://about.gitlab.com/images/blogimages/how-to-keep-your-git-history-clean-with-interactive-rebase/diagram-fixup.png){: .medium.center}\nHow \"fixup\" works\n{: .note.text-center}\n\nAfter we're done, it looks as if there had never been a problem with our original commit! So let's walk through this using a practical example. \n\nThe first step is to do whatever is necessary to fix the problem: this could mean adding a new file, making changes to existing ones, deleting obsolete files... you \"just\" need to produce the changes that correct the mistake.\n\nThe next step is to commit these changes to the repository – but with a little extra: when making the commit, we are going to use the `--fixup` flag and tell Git the commit hash of our bad commit:\n\n```\n$ git add corrections.txt\n$ git commit --fixup 2b504bee\n```\n\nWhen you now take a look at the commit history, you will see that a pretty ordinarily looking commit has been created – probably not the magic and fireworks you would have expected. But if you take a closer look, you will see that something’s going on: the new commit has automatically been prepended with \"fixup !\" and the commit subject of our bad commit.\n\n![The original commit and the fix commit](https://about.gitlab.com/images/blogimages/how-to-keep-your-git-history-clean-with-interactive-rebase/fixup_create-fix-commit@2x.png){: .shadow.medium.center}\nThe original commit and the fix commit\n{: .note.text-center}\n\nThe third step now is to start the interactive rebase session. Again, we choose the parent of our bad commit as the starting point...\n\n```\n$ git rebase -i 0023cddd --autosquash\n```\n\n... and as the second part of the secret sauce, we are using the `--autosquash` flag. This option makes sure that we don't have to do _anything_ in the editor window that is now open. Take a close look at the situation:\n\n![Our fix commit is marked \"fixup\" and sorted to the right position](https://about.gitlab.com/images/blogimages/how-to-keep-your-git-history-clean-with-interactive-rebase/fixup_editor@2x.png){: .shadow.medium.center}\nOur fix commit is marked \"fixup\" and sorted to the right position\n{: .note.text-center}\n\nYou will see that Git automatically did two things for us:\n1. It marked our band-aid commit as \"fixup.\"\n2. It re-ordered the lines so that our band-aid commit appears directly below our bad commit. This is because fixup works exactly like squash in that it _combines with the line above_.\n\nIn other words: there's nothing left to do for us but save and close the editor window.\n\nLet's take another look at the commit history:\n\n![A happy ending!](https://about.gitlab.com/images/blogimages/how-to-keep-your-git-history-clean-with-interactive-rebase/fixup_final-corrected@2x.png){: .shadow.medium.center}\nA happy ending!\n{: .note.text-center}\n\nNot only does our originally bad commit now contain the changes from our band-aid commit. But on top of that, the ugly band-aid commit has disappeared from the commit history! Everything is nice and clean, just as if there had never been a problem!\n\n## Discover the power of Git rebase interactive\n\nThere are lots of use cases for interactive rebase – and most of them in the department of “fixing mistakes”. For an overview of other useful things you can do, I recommend the _free_ [\"First Aid Kit for Git\"](https://www.git-tower.com/learn/git/first-aid-kit?utm_source=gitlab&utm_medium=guestpost&utm_campaign=interactive-rebase): it’s a collection of short videos (2-3 min per episode) that help you learn to undo mistakes using interactive rebase and other Git tools.\n\nEditor's note: I had to use interactive rebase when reviewing this very post! One of my commits included an image that was greater than 1MB which is against the rules for GitLab website project. I had to go back and fix that commit to include a correctly sized image instead. Thanks for the lesson, universe! 😁\n{: .note}\n\n## More Git tips and tricks\n\n- [15 Git tips to improve your workflow](/blog/15-git-tips-improve-workflow/)\n- [How Git Partial Clone lets you fetch only the large file you need](/blog/partial-clone-for-massive-repositories/)\n- [Git happens! 6 Common Git mistakes and how to fix them](/blog/git-happens/)\n\n### About the guest author\n\n_[Tobias Günther](https://twitter.com/gntr) is the CEO of [Tower](https://www.git-tower.com/?utm_source=gitlab&utm_medium=guestpost&utm_campaign=interactive-rebase), the popular Git desktop client that helps more than 100,000 developers around the world to be more productive with Git._\n\nCover image by [David Taljat](https://www.pexels.com/@david-taljat-3748658) on [Pexels](https://www.pexels.com/photo/yellow-and-blue-line-on-gray-asphalt-road-5690623/)\n{: .note}\n",[702,726],{"slug":4734,"featured":6,"template":678},"keep-git-history-clean-with-interactive-rebase","content:en-us:blog:keep-git-history-clean-with-interactive-rebase.yml","Keep Git History Clean With Interactive Rebase","en-us/blog/keep-git-history-clean-with-interactive-rebase.yml","en-us/blog/keep-git-history-clean-with-interactive-rebase",{"_path":4740,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4741,"content":4747,"config":4752,"_id":4754,"_type":16,"title":4755,"_source":17,"_file":4756,"_stem":4757,"_extension":20},"/en-us/blog/docker-hub-rate-limit-monitoring",{"title":4742,"description":4743,"ogTitle":4742,"ogDescription":4743,"noIndex":6,"ogImage":4744,"ogUrl":4745,"ogSiteName":692,"ogType":693,"canonicalUrls":4745,"schema":4746},"How to make Docker Hub rate limit monitoring a breeze","Docker Hub Rate Limits are enforced and we need to find ways to monitor the remaining pull requests. Explore some ways to create a monitoring plugin for Nagios/Icinga/Sensu/Zabbix and test-drive a new Prometheus exporter in combination with Grafana.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681749/Blog/Hero%20Images/vidarnm-unsplash.jpg","https://about.gitlab.com/blog/docker-hub-rate-limit-monitoring","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to make Docker Hub rate limit monitoring a breeze\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2020-11-18\",\n      }",{"title":4742,"description":4743,"authors":4748,"heroImage":4744,"date":4749,"body":4750,"category":14,"tags":4751},[1504],"2020-11-18","\n\nWhen we learned about the [Docker Hub Rate Limit](/blog/mitigating-the-impact-of-docker-hub-pull-requests-limits/), we thought about ways to mitigate and analyse the new situation. Container images are widely used and adopted for sandbox environments in [CI/CD pipelines](/solutions/continuous-integration/) and cloud-native production environments with app deployment in [Kubernetes clusters](/solutions/kubernetes/).\n\n## What is meant by Docker Hub limits?\n\nEach `docker pull` request toward the central `hub.docker.com` container registry is being counted. When a defined limit is reached, future requests are blocked and might be delayed into the next free window. [CI/CD](/topics/ci-cd/) jobs cannot be executed anymore after receiving a HTTP error `429 - too many requests` and similar errors will be seen in production deployment logs for Kubernetes.\n\nDocker defines this limit with 100 anonymous requests every six hours for the client's source IP address. If you have multiple container deployments behind an IP address, for example a company DMZ using a NAT, this limit can be reached very fast. A similar problem happens with watchtower tools which try to keep your container images updated, for example on your self-managed GitLab Runner. The limit can be raised by logging in, and by getting a paid subscription.\n\nThe question is: Where can you see the current limit and the remaining pull requests?\n\n### How to check the Docker Hub request limit?\n\nThe [Docker documentation](https://docs.docker.com/docker-hub/download-rate-limit/#how-can-i-check-my-current-rate) suggests to use CLI commands which invoke `curl` HTTP requests against the Docker Hub registry and parse the JSON response with [jq](https://stedolan.github.io/jq/).\n\nDefine the `IMAGE` variable once for the following CLI commands to use:\n\n```shell\n$ IMAGE=\"ratelimitpreview/test\"\n```\n\nObtain a token for authorization. Optionally print the variable value to verify its content.\n\n```shell\n$ TOKEN=$(curl \"https://auth.docker.io/token?service=registry.docker.io&scope=repository:$IMAGE:pull\" | jq -r .token)\n\n$ echo $TOKEN\n```\n\nThe next step is to simulate a `docker pull` request. Instead of using `GET` as HTTP request method, a `HEAD` request is sent which does not count toward the rate limit. The response headers contain the keys `RateLimit-Limit` and `RateLimit-Remaining`.\n\n```shell\n$ curl --head -H \"Authorization: Bearer $TOKEN\" https://registry-1.docker.io/v2/$IMAGE/manifests/latest\n```\n\nThe limit in the example is `2500` with remaining `2495` pull requests. `21600` defines the limit time window as six hours.\n\n```\nRateLimit-Limit: 2500;w=21600\nRateLimit-Remaining: 2495;w=21600\n```\n\n`RateLimit-Reset` can be returned too, this will be the remaining time until the limits are reset.\n\n### Create a monitoring script\n\nThe CLI commands can be turned into a programming language of your choice which provides methods for HTTP requests and better response parsing. The algorithm needs to follow these steps:\n\n* Obtain an authorization token from Docker Hub. Username/password credentials can be optionally provided, otherwise the request happens anonymously.\n* Send a `HEAD` request to the Docker Hub registry and simulate a `docker pull` request\n* Parse the response headers and extract the values for `RateLimit-Limit` and `RateLimit-Remaining`\n* Print a summary of the received values\n\nA plugin script which can be used by Nagios/Icinga/Sensu/Zabbix and others has additional requirements. It needs to implement the [Monitoring Plugins API specification](https://www.monitoring-plugins.org/doc/guidelines.html):\n\n* Print the limit and remaining count\n* Calculate a state: Ok, Warning, Critical, Unknown and print a helpful text on the shell\n* Add optional warning/critical thresholds for the remaining count. Whenever the count is lower than the threshold, the state changes to Warning/Critical and the exit code changes: `OK=0, Warning=1, Critical=2, Unknown=3`\n* Collect limit values as performance metrics for graphing and visualization\n* Add verbose mode and timeout parameters as plugin development best practices. If Docker Hub does not respond within 10 seconds as default, the plugin exits and returns `Unknown` as state.\n\nYou can download the [check_docker_hub_limit.py plugin script](https://gitlab.com/gitlab-com/marketing/corporate_marketing/developer-evangelism/code/check-docker-hub-limit) and integrate it into your monitoring environment.\n\n#### Use the monitoring plugin script\n\nThe [check_docker_hub_limit.py plugin script](https://gitlab.com/gitlab-com/marketing/corporate_marketing/developer-evangelism/code/check-docker-hub-limit) plugin is written in Python 3 and requires the `requests` library. Follow the [installation instructions](https://gitlab.com/gitlab-com/marketing/corporate_marketing/developer-evangelism/code/check-docker-hub-limit#installation) and run the plugin script with the `--help` parameter to see all available options:\n\n```\n$ python check_docker_hub_limit.py --help\n\nusage: check_docker_hub_limit.py [-h] [-w WARNING] [-c CRITICAL] [-v] [-t TIMEOUT]\n\nVersion: 2.0.0\n\noptional arguments:\n  -h, --help            show this help message and exit\n  -w WARNING, --warning WARNING\n                        warning threshold for remaining\n  -c CRITICAL, --critical CRITICAL\n                        critical threshold for remaining\n  -v, --verbose         increase output verbosity\n  -t TIMEOUT, --timeout TIMEOUT\n                        Timeout in seconds (default 10s)\n```\n\nRun the script to fetch the current remaining count. The plugin script exit code returns `0` being OK.\n\n```\n$ python3 check_docker_hub_limit.py\nOK - Docker Hub: Limit is 5000 remaining 4997|'limit'=5000 'remaining'=4997\n\n$ echo $?\n0\n```\n\nSpecify the warning threshold with `10000` pulls, and the critical threshold with `3000`.\nThe example shows how the state changes to `WARNING` with a current count of `4999` remaining\npull requests. The plugin script exit code changes to `1`.\n\n```\n$ python3 check_docker_hub_limit.py -w 10000 -c 3000\nWARNING - Docker Hub: Limit is 5000 remaining 4999|'limit'=5000 'remaining'=4999\n\n$ echo $?\n1\n```\n\nSpecify a higher critical threshold with `5000`. When the remaining count goes below this value,\nthe plugin script returns `CRITICAL` and changes the exit state into `2`.\n\n```\n$ python3 check_docker_hub_limit.py -w 10000 -c 5000\nCRITICAL - Docker Hub: Limit is 5000 remaining 4998|'limit'=5000 'remaining'=4998\n\n$ echo $?\n2\n```\n\nWhen a timeout is reached, or another error is thrown, the exit state switches to `3` and the output state becomes `UNKNOWN`.\n\n### Use a Prometheus exporter for rate limit metrics\n\n[Prometheus](https://prometheus.io/) scrapes metrics from HTTP endpoints. There is a variety of exporters for Prometheus to monitor host systems, HTTP endpoints, containers, databases, etc. Prometheus provides [client libraries](https://prometheus.io/docs/instrumenting/clientlibs/) to make it easier to start writing your own custom exporter. The metrics need to be exported in a [defined format](https://prometheus.io/docs/instrumenting/exposition_formats/).\n\nThe Docker Hub limit values can be fetched with obtaining an authorization token first, and then sending a `HEAD` request shown above. The code algorithm follows the ideas of the monitoring plugin. Instead of printing the values onto the shell, the metric values are exposed with an HTTP server. The Prometheus client libraries provide this functionality built-in.\n\nWe have created a [Prometheus Exporter for Docker Hub Rate Limits](https://gitlab.com/gitlab-com/marketing/corporate_marketing/developer-evangelism/code/docker-hub-limit-exporter) using the [Python client library](https://github.com/prometheus/client_python). The repository provides a demo environment with `docker-compose` which starts the exporter, Prometheus and Grafana.\n\nEnsure that [docker-compose is installed](https://docs.docker.com/compose/install/) and clone/download the repository. Then run the following commands:\n\n```\n$ cd example/docker-compose\n\n$ docker-compose up -d\n```\n\nNavigate to `http://localhost:3030` to access Grafana and explore the demo environment with the pre-built dashboard.\n\n![Grafana dashboard for Docker Hub Limit Prometheus Exporter](https://about.gitlab.com/images/blogimages/docker-hub-limit-monitoring/grafana_prometheus_docker_hub_limit_exporter_demo.png){: .shadow.medium.center}\n\nGrafana dashboard for Docker Hub Limits\n{: .note.text-center}\n\n### More monitoring/observability ideas\n\nUse the steps explained in this blog post to add Docker Hub limit monitoring. Evaluate the Prometheus exporter or the check plugin, or create your own monitoring scripts. Fork the repositories and send a MR our way!\n\n* [check-docker-hub-limit for Nagios/Icinga/Zabbix/Sensu](https://gitlab.com/gitlab-com/marketing/corporate_marketing/developer-evangelism/code/check-docker-hub-limit)\n* [docker-hub-limit-exporter for Prometheus](https://gitlab.com/gitlab-com/marketing/corporate_marketing/developer-evangelism/code/docker-hub-limit-exporter)\n\nThe Prometheus exporter and the monitoring plugin script can help to see trends and calculate usage over time. Use your own local (GitLab) container registry or one of the available caching methods described in these blog posts:\n\n* [Cache Docker images in your CI/CD infrastructure](/blog/mitigating-the-impact-of-docker-hub-pull-requests-limits/). Use this resource for possible solutions around caching and proxying.\n* [Use the Dependency Proxy](/blog/minor-breaking-change-dependency-proxy/). Learn more about the GitLab Dependency Proxy being made open source in the future.\n* [#everyonecancontribute cafe: Docker Hub Rate Limit: Mitigation, Caching and Monitoring](https://everyonecancontribute.com/post/2020-11-04-cafe-7-docker-hub-rate-limit-monitoring/). This is a community meetup hosted by Developer Evangelists at GitLab. The blog post includes a video with more insights and discussion.\n\nPhoto by [Vidar Nordli-Mathisen](https://unsplash.com/@vidarnm) from [Unsplash](https://www.unsplash.com).\n{: .note}\n",[873,894,1002,703,1286],{"slug":4753,"featured":6,"template":678},"docker-hub-rate-limit-monitoring","content:en-us:blog:docker-hub-rate-limit-monitoring.yml","Docker Hub Rate Limit Monitoring","en-us/blog/docker-hub-rate-limit-monitoring.yml","en-us/blog/docker-hub-rate-limit-monitoring",{"_path":4759,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4760,"content":4765,"config":4770,"_id":4772,"_type":16,"title":4773,"_source":17,"_file":4774,"_stem":4775,"_extension":20},"/en-us/blog/gitlab-for-agile-portfolio-planning-project-management",{"title":4761,"description":4762,"ogTitle":4761,"ogDescription":4762,"noIndex":6,"ogImage":4022,"ogUrl":4763,"ogSiteName":692,"ogType":693,"canonicalUrls":4763,"schema":4764},"How to use GitLab for Agile portfolio planning and project management","GitLab provides features that are flexible enough to be used for scaled Agile portfolio planning and project management, regardless of the framework you choose.","https://about.gitlab.com/blog/gitlab-for-agile-portfolio-planning-project-management","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab for Agile portfolio planning and project management\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Victor Hernandez\"},{\"@type\":\"Person\",\"name\":\"Julie Byrne\"}],\n        \"datePublished\": \"2020-11-11\",\n      }",{"title":4761,"description":4762,"authors":4766,"heroImage":4022,"date":4767,"body":4768,"category":14,"tags":4769},[1896,1897],"2020-11-11","\nMany organizations using GitLab want to understand how to best apply the various features to support [Agile project and portfolio management](/solutions/agile-delivery/) processes (PPM) at scale. These organizations use different Agile frameworks. In a previous blog post, we outlined [an approach for using GitLab for Agile software development](/blog/gitlab-for-agile-software-development/). Since the original post, we've continued to enhance functionality for lean/Agile portfolio planning and Agile project management. In this blog post, we’re updating recommendations for using Agile based on these enhancements and we describe how these features can be utilized for a variety of different scaling frameworks.\n\n## Agile software development at scale\n\nFirst, let’s take a look at a typical scaling model of [Agile software development](/topics/agile-delivery/) beyond the individual team level. Whether you’ve adopted a specific scaling framework such as the [Scaled Agile Framework (SAFe)](https://www.scaledagileframework.com/), [Disciplined Agile (DA)](https://www.pmi.org/disciplined-agile), [Large Scale Scrum (LeSS)](https://less.works/), or [Spotify](https://medium.com/scaled-agile-framework/exploring-key-elements-of-spotifys-agile-scaling-model-471d2a23d7ea), most scaling models have similarities in their approach, organizing Agile teams into teams of teams, and even into teams of teams of teams.\n\n![](https://about.gitlab.com/images/blogimages/team-teams2.png){: .medium.center}\n\nTypically, scaling frameworks use these types of labels to describe each level:\n\n| **Level** | **Common Names** | **Description** |\n| ----- | ----- | ----- |\n| Team | Scrum team, Kanban team, Squad | A cross functional group (including BA, Dev, Test, and other supporting roles) implementing stories and bug fixes for an application or set of applications|\n| Team of Teams | Program, Release Train, Tribe | A set of teams who plan together and coordinate efforts to implement features for a system involving one or more applications |\n| Team of Teams of Teams | Portfolio, Business Unit, Alliance | One or more programs with a shared set of strategic goals and themes, typically funded with a single budget |\n\nNow that we've reviewed the different levels of Agile at scale, let’s next think about what types of data and visibility are required for agility at each level.\n\nThe scrum master/project manager/tribe lead, product owner, and team members are part of the Team level that is focused on short-term planning, typically weekly to monthly. They will want:\n\n- A board view to show flow of work\n- Current and upcoming iteration plan\n- A task list for each work item\n- Visibility into team progress\n- Team predictability\n\nThe program manager/release train engineer, product manager/product area lead, and design lead guide the Team of Teams, with a focus is on mid-range planning, monthly to quarterly (or potentially a bit longer). They will want visibility into:\n\n- A prioritized feature list with anticipated business value captured\n- Feature roadmap\n- View of mid-range plan\n- Epic health\n- Progress against plan\n- Program predictability\n\nFinally, portfolio managers, business leaders, and chief architects perform strategic long-term planning, typically quarterly to annually or longer, at the Team of Teams of Teams level. They will want to see:\n\n- A list of long-term epics/initiatives/business projects, categorized by theme and/or strategic goals\n- The long-term strategic roadmap\n\n## How can we best support these needs using GitLab?\n\nFirst, we need to understand what GitLab object types to use for support the appropriate visibility at each level.\n\n| **GitLab Structure** | **Team** | **Team of Teams** | **Team of Teams of Teams** |\n| ----- | ----- | ----- | -----  |\n| Org structure | Project or sub-sub-group | Sub-group | Top level group |\n| Work items | Issue | Child epic | Parent epic |\n| Time boxes | Iteration | Milestone | Roadmap across milestones |\n\nIn GitLab, epics can be defined in a hierarchy to break down long-term epics into a set of shorter-term epics that can each be delivered by a single Team-of-Teams. While we will use a single parent-child epic hierarchy in this blog to keep things simple, you can use more levels of nesting. The lowest level of epic in the hierarchy would be linked to a set of issues to define the work each team will do in order to implement that epic. GitLab is very flexible and does not enforce a hierarchy. For example, when there are cases when an epic should be tracked at the portfolio level but be decomposed directly into issues, with no features in between, GitLab will allow you to do that linking directly without having to create dummy features in the middle.\n\n![](https://about.gitlab.com/images/blogimages/epic_hierarchy2.png){: .medium.center}\n\nWe recommend using [scoped labels](https://docs.gitlab.com/ee/user/project/labels.html#scoped-labels) to define epic types, e.g., you might define long-term epics to be portfolio epics, and decompose them into shorter-term features. Using _epic::portfolio-epic_ and _epic::feature_ will allow you to appropriately categorize and filter a list of epics and make sure that each epic exists in the appropriate location.\n\nA [group](https://docs.gitlab.com/ee/user/group/) can be used to organize projects. And groups can be nested, e.g., a parent group can contain multiple child groups, and each child group can have its own subgroups, etc. A GitLab [project](https://docs.gitlab.com/ee/user/project/) contains a single source code repository, issue tracker, and associated tools and functionality in order to collaborate on software development for that repository.\n\n![](https://about.gitlab.com/images/blogimages/group_project2.png){: .medium.center}\n\nNote: Group permissions are propagated down the tree from the top-level, so, e.g., a maintainer in the top-level group will have maintainer permissions in the entire group hierarchy.\n\nWe recommend that you use a nested group hierarchy to define your scaled organizational structure for Team of Teams of Teams, Team of Teams, and Teams. For example, consider an electronic banking program that is part of the digital services portfolio for a financial services provider. The electronic banking program might have separate teams that work on web, mobile, backend, and middleware. You would use a parent group for the digital services portfolio, a sub-group for the electronic banking program, and a separate project within the sub-group for each team.\n\n![](https://about.gitlab.com/images/blogimages/group_project_example.png){: .medium.center}\n\nGenerally speaking, parent epics would be defined within the top-level group since they define work that can span the sub-groups. Each parent epic would be broken down into multiple child epics, each of which is defined within the appropriate child group (representing a Team-of-Teams).\n\nThe example above is simple in that each Agile team is working on a single repository. But what if that’s not the case?\n\n- If a single team works exclusively on multiple repositories (but no other team works on the them), then create a sub-group for the team, and include each repo as a project.\n- If multiple teams work on a collection of repositories, use the Team of Teams group for collaboration across all Teams in all projects, and use individual scoped labels for each team to track their issues on filtered boards.\n\nGitLab provides an [issue tracker](https://docs.gitlab.com/ee/user/project/issues/) for any types of issues you want to manage and track. Typically, for Agile software development teams, these would be things like user stories and defects. We recommend that you use [scoped labels](https://docs.gitlab.com/ee/user/project/labels.html#scoped-labels) to define the different issue types, for ease of filtering and reporting. The great news is that you can have as many or as few issue types as you see fit. GitLab does not provide the ability to define a custom schema for each issue type as that tends to complicate both administration and usage of issues and gets in the way of software development. Instead, use [custom issue templates](https://docs.gitlab.com/ee/user/project/description_templates.html#creating-issue-templates) to provide guidance to the end user on what types of information should be captured for each issue type, and even to set labels automatically on the issue as it is created.\n\nGitLab makes project status reporting easy with the issue [health status](https://docs.gitlab.com/ee/user/project/issues/#health-status). Each issue can have a status of `On Track`, `Needs Attention`, or `At Risk`. The health statuses of all issues for an epic are reported within the epic details for a quick snapshot of the health of the overall epic.\n\nFinally, we have to define timeboxes to use for our planning cadences. We tend to use [milestones](https://docs.gitlab.com/ee/user/project/milestones/) for our mid-range planning, i.e., a quarterly development plan. Define the milestone at the highest group level that will be using that cadence, e.g., if the entire portfolio plans on a quarterly basis, then the planning milestone should be defined at the top-level group level. If each team of teams plans on a different mid-range cadence, then you would want to define separate milestones at each child group level. Note that milestones get added directly to issues, so the projects that will use the milestones must be within the group hierarchy where the milestone is defined. One other consideration is that an issue can only have a single milestone associated with it, so it’s a good idea to align on the best use of milestones across the Team of Teams before starting to use them.\n\nWe recently released our [iterations MVC](https://gitlab.com/groups/gitlab-org/-/epics/4012) in GitLab! This allows you to define, at the group or individual project level, short-term cadences that a team or set of teams uses for planning and tracking their work. While, as an MVC, iteration functionality is not yet as robust as milestones, we do have plans for enhancements including using iterations on boards, filtering issue lists by iteration, and burnup/burndown charts. You can view the epic [Iterations in GitLab](https://gitlab.com/groups/gitlab-org/-/epics/2422) to learn more about planned enhancements. And that doesn’t mean Kanban teams are out of luck. We innately support Kanban in GitLab, too, with issue boards, so you can have a mix of iteration based teams and continuous flow teams working together.\n\n## Agile PPM: putting it all together\n\nHere’s how the GitLab features come together to support Agile at scale to allow planning from the highest level down to the individual team, and to provide visibility, traceability, and reporting at each level:\n\n![](https://about.gitlab.com/images/blogimages/epic_hierarchy.png){: .medium.center}\n\nYou can also check out the video below to see how the structure comes together in GitLab.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/5J0bonGoECs\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Read more about Agile at GitLab\n\n- [See more information about our Agile delivery solution](/solutions/agile-delivery/)\n- [Build your Agile roadmap in GitLab](https://docs.gitlab.com/ee/user/group/roadmap/)\n- [Learn how to create iterations](https://docs.gitlab.com/ee/user/group/iterations/)\n\nCover image by [Martin Sanchez](https://unsplash.com/@martinsanchez?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/MD6E2Sv__iA)\n{: .note}\n",[1646,749,727,1347],{"slug":4771,"featured":6,"template":678},"gitlab-for-agile-portfolio-planning-project-management","content:en-us:blog:gitlab-for-agile-portfolio-planning-project-management.yml","Gitlab For Agile Portfolio Planning Project Management","en-us/blog/gitlab-for-agile-portfolio-planning-project-management.yml","en-us/blog/gitlab-for-agile-portfolio-planning-project-management",{"_path":4777,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4778,"content":4784,"config":4790,"_id":4792,"_type":16,"title":4793,"_source":17,"_file":4794,"_stem":4795,"_extension":20},"/en-us/blog/lessons-in-iteration-from-new-infrastructure-team",{"title":4779,"description":4780,"ogTitle":4779,"ogDescription":4780,"noIndex":6,"ogImage":4781,"ogUrl":4782,"ogSiteName":692,"ogType":693,"canonicalUrls":4782,"schema":4783},"Lessons in iteration from a new team in infrastructure","A new, small team at GitLab discovered that minimum viable change applies to scaling problems too.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681724/Blog/Hero%20Images/skateboard-iteration.jpg","https://about.gitlab.com/blog/lessons-in-iteration-from-new-infrastructure-team","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Lessons in iteration from a new team in infrastructure\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sean McGivern\"}],\n        \"datePublished\": \"2020-11-09\",\n      }",{"title":4779,"description":4780,"authors":4785,"heroImage":4781,"date":4787,"body":4788,"category":14,"tags":4789},[4786],"Sean McGivern","2020-11-09","\n\nThe [Scalability Team][scalability] has the goal of understanding\npotential scaling bottlenecks in our application. We formed a year ago\nwith one person, and as of early 2020, we are made up of three backend\nengineers, plus one site reliability engineer. We are a\nsort of [program team] so we have a wide remit, and there's only one\nsimilar team at GitLab: our sibling [Delivery Team][delivery]. All of\nthe backend engineers in the team (including me) came from\nworking on product development rather than infrastructure work.\n\n[scalability]: /handbook/engineering/infrastructure/team/scalability/\n[program team]: https://lethain.com/programs-owning-the-unownable/\n[delivery]: /handbook/engineering/infrastructure/team/delivery/\n\nWe recently finished a project where we [investigated our use of\nSidekiq][sidekiq] and made various improvements. We decided to continue\nthe same approach of looking at services, and got started with our next\ntarget of Redis. Here are some lessons we took away:\n\n[sidekiq]:/blog/scaling-our-use-of-sidekiq/\n\n## 1. Don't lose sight of what matters most: impact\n\nWe chose to split our work on Redis into three phases:\n\n1. [Visibility][v]: increase visibility into the service.\n2. [Triage][t]: use our increased visibility to look for problems and\n   potential improvements, and triage those.\n3. [Knowledge sharing][ks]: share what we learned with the rest of the\n   Engineering department.\n\n[v]: https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/309\n[t]: https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/309\n[ks]: https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/265\n\n[Iteration] is crucial at GitLab, so much so that we have regular\n[Iteration Office Hours]. On the surface, you could say that we were\niterating here: our issues were small and well-scoped and we were\ndelivering code to production regularly.\n\n[Iteration]: https://handbook.gitlab.com/handbook/values/#iteration\n[Iteration Office Hours]: /handbook/ceo/#iteration-office-hours\n\nThe problem, as it turned out, was that we were focused so heavily on\nunderstanding the service, that we lost track of the [results] we were\ntrying to deliver. Our [values hierarchy] puts results at the top, but\nwe hadn't given the results enough attention. We are a small team that\nneeds to cover a wide area, and we need to deliver _impactful_ changes.\n\n[results]: https://handbook.gitlab.com/handbook/values/#results\n[values hierarchy]: https://handbook.gitlab.com/handbook/values/#hierarchy\n\nThere are some [examples in our handbook][impact] – which we've added as\na result of this project – but we define impact as either having a\ndirect effect on the platform, our infrastructure, or our development\nteams. That was what was missing here, because the impact was loaded\ntowards the very end of the project: largely in the knowledge sharing\nsection.\n\n[impact]: /handbook/engineering/infrastructure/team/scalability/#impact\n\nWe spent a long time (several months) improving our visibility, which\ndefinitely has a positive impact on our SREs who spend time\ninvestigating incidents. But we could have delivered this value and more\nin a shorter time period, if we had kept clear sights on the impact we\nwanted to have.\n\n## 2. Minimum viable change applies to scaling problems too\n\nWith that framing in mind, it's quite clear that we weren't iterating in\nthe best way. To use a famous example, it's like we'd started building a\ncar by building the wheels, then the chassis, etc. That takes a long\ntime to get something useful. We could have started by [building a\nskateboard]. We didn't have a good sense of what a [minimum viable change](https://handbook.gitlab.com/handbook/values/#minimal-viable-change-mvc)\nwas for our team, so we got it wrong.\n\n[building a skateboard]: https://blog.crisp.se/2016/01/25/henrikkniberg/making-sense-of-mvp\n\n![Building a skateboard iteration](https://about.gitlab.com/images/blogimages/scalability-redis-efficiency-skateboard.png){: .medium.center}\nIllustration by [Henrik Kniberg](https://blog.crisp.se/2016/01/25/henrikkniberg/making-sense-of-mvp)\n{: .note.text-right}\n\nWhat would a minimum viable change look like? When we worked on this project, we\ncovered several topics: adding Redis calls to our standard structured\nlogs, exposing slow log information, and so on. With hindsight, the best\nway would probably be to slice the project differently. We could take\nthe three steps above (visibility, triage, knowledge sharing), but\nconsider them all to be necessary for a project on a single topic with a\ntangible goal.\n\nWe did this, with all the impact at the end:\n\n![Working through the first step for all topics, the second step for all topics, and finally having impact in the third step](https://about.gitlab.com/images/blogimages/scalability-redis-efficiency-before.jpg)\n\nBut traveling in the other direction would have been much more\neffective:\n\n![Working through all steps for the first topic, having impact, then starting again at the second topic](https://about.gitlab.com/images/blogimages/scalability-redis-efficiency-after.jpg)\n\nThis leads to a state where:\n\n1. The impact we make is clearer.\n2. We start making an impact sooner.\n3. We can re-assess after every project, and stop early once we have\n   done enough.\n\nThe sooner we have this impact, the sooner we can see the results of\nwhat we've done. It's also good for morale to see these results on a\nregular basis!\n\n## 3. Shape your projects to deliver impact throughout\n\nThe way that we originally structured our work to improve Redis usage made it harder to see\nour impact than it should have been. For example, we [updated our\ndevelopment documentation][dev-docs-update] at the end of the project.\nThis was useful, but it would have been much more useful to backend\nengineers if we'd updated the documentation along the way, so they always had the best information we could give them.\n\n[dev-docs-update]: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/41889\n\nFor a more positive example: in the visibility stage, we created\na couple of issues directly for stage groups to address, rather than\nwaiting for the triage or knowledge sharing stage to do so. One of those\nissues was about [large cache entries for merge request\ndiscussions][mr-cache]. By getting this in front of the relevant\ndevelopment team earlier, we were able to\nget the fix scheduled and completed sooner as well.\n\n[mr-cache]: https://gitlab.com/gitlab-org/gitlab/-/issues/225600\n\nRegularly delivering projects with clear impact means that we get\nfeedback earlier (from engineers in Development and Infrastructure, or\nfrom the infrastructure itself), we can cover a wider area in less time,\nand we are happier about the work we're doing.\n\nAs people who went from working directly on user-facing features to\nworking on a property of the system as a whole, we learned that we can\nstill set ourselves an MVC to keep us on the right path, as long as we\nthink carefully about the results we want to achieve.\n\n[Cover image](https://unsplash.com/@viniciusamano?utm_source=unsplash&amp;utm_medium=referral&amp;utm_content=creditCopyText) by shawn henry on [Unsplash](https://unsplash.com/s/photos/skateboard?utm_source=unsplash&amp;utm_medium=referral&amp;utm_content=creditCopyText)\n{: .note}\n",[915,704,727],{"slug":4791,"featured":6,"template":678},"lessons-in-iteration-from-new-infrastructure-team","content:en-us:blog:lessons-in-iteration-from-new-infrastructure-team.yml","Lessons In Iteration From New Infrastructure Team","en-us/blog/lessons-in-iteration-from-new-infrastructure-team.yml","en-us/blog/lessons-in-iteration-from-new-infrastructure-team",{"_path":4797,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4798,"content":4804,"config":4810,"_id":4812,"_type":16,"title":4813,"_source":17,"_file":4814,"_stem":4815,"_extension":20},"/en-us/blog/how-we-optimized-our-infrastructure-spend-at-gitlab",{"title":4799,"description":4800,"ogTitle":4799,"ogDescription":4800,"noIndex":6,"ogImage":4801,"ogUrl":4802,"ogSiteName":692,"ogType":693,"canonicalUrls":4802,"schema":4803},"How we optimized infrastructure spend at GitLab","We keep our cloud spend under control with a spend optimization framework – now we're sharing it with you.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681701/Blog/Hero%20Images/piggy_bank.jpg","https://about.gitlab.com/blog/how-we-optimized-our-infrastructure-spend-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we optimized infrastructure spend at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Davis Townsend\"}],\n        \"datePublished\": \"2020-10-27\",\n      }",{"title":4799,"description":4800,"authors":4805,"heroImage":4801,"date":4807,"body":4808,"category":14,"tags":4809},[4806],"Davis Townsend","2020-10-27","\n\nInfrastructure spend optimization is a hot topic these days as many established companies are migrating workloads to the cloud. Similarly,  fast-growing startups are struggling to control their operating costs as they expand their cloud footprint to meet user demand. \n\nAt GitLab we have taken a methodical and data-driven approach to the problem so we can reduce our cloud spend and control our operating costs, while still creating great features for our customers. We designed a five-stage framework which emphasizes building awareness of our infrastructure spend to the point where any change in costs is well understood and no longer a surprise.\n\nOur framework is very similar to a normal data maturity framework (shown below) that would progress through descriptive, predictive, and finally prescriptive analytics, but we tailor it specifically for this domain. I'll explain each stage and what it looks like at GitLab so you can see how you might apply it to your own organization.\n\n![Normal Data Maturity Framework](https://about.gitlab.com/images/blogimages/2020-10-28-How-We-Optimized-Infra-spend/DMM.jpeg \"Normal Data Maturity Framework\"){: .medium.center}\nA normal data maturity framework \n{: .note.text-center}\n\n## Spend optimization framework\n\n## 1. Basic cost visibility\n This stage can be thought of as data exploration. You just want to understand as much as you can about where you are spending money at a high level. What vendors and services are you spending the most money on? This data is generally provided by cloud vendors through a billing console, as well as through billing exports. I've found the way to get the best use out of both options is to use the provided billing console for answering simple questions about specific costs quickly, and the exports for integrating this data into your own analytics architecture for more granular reporting, [multicloud](/topics/multicloud/) reporting, or for specific recurring reports you need over a longer time horizon.\n \n### GitLab example\nWhen starting out, we looked at Google Cloud Platform (GCP) and their [Default Billing Export](https://cloud.google.com/billing/docs/how-to/export-data-bigquery) to get an overview of which products/projects/SKUs were responsible for the majority of our spend.\n\n## 2. Cost allocation\nThis stage is all about going from high-level areas of spend to more granular dimensions that tie back to relevant business metrics in your company. At GitLab we may want to look at what we spend on particular services like CI runners, or what we spend to support employees using GitLab.com as part of their job vs. customer spend. This data may not be readily available to you so there could be a lot of work involved to tie these sorts of relevant business dimensions back to the cost reports provided by your vendor.\n\n### GitLab example\nFor our production architecture we had some [GCP labels](https://cloud.google.com/compute/docs/labeling-resources) that indicated the internal service applied to the majority of our instances, so we started with those to see which services we spent most of our money on. More recently, we have created a [handbook page for Infrastructure Standards](/handbook/infrastructure-standards/) around project creation and label naming so that we can get even more insight out of our bill.\n\n\n\n## 3. Optimize usage efficiency\nOnce you can allocate costs to their relevant business metrics, then can you start to ask interesting questions such as, “Why is our storage spend so high on feature x?” By asking these questions and then talking with the subject matter experts about these potential areas of optimization you can start to come up with ideas to reduce some of this cost.\n\n### GitLab example\nWhen we reached this stage we began to identify many areas of opportunity, including:\n\n- [CI runners](https://gitlab.com/gitlab-org/gitlab/-/issues/35777): One of the areas discovered from stage 2 happened to be our CI runners, for which we created more granular reporting to see the cost by specific repos, pipelines, and jobs, which allowed us to find some ways to optimize our own internal use of CI.\n- [Object storage](https://gitlab.com/gitlab-com/gl-infra/infrastructure/-/issues/10087): We discovered high storage costs for outdated Postgres backups. We resolved this by enabling bucket lifecycle policies and reduced our object storage for that bucket by 900TB.\n- [Network usage](https://gitlab.com/gitlab-com/gl-infra/infrastructure/-/issues/10222): By correlating a recent change in our spend profile to a network architecture change, we were able to highlight the need for additional changes. We ultimately implemented a change to directly download runner artifacts from GCS instead of having the traffic be proxied. This significantly reduced our overall networking cost.\n\n## 4. Measure business outcomes vs spend\n\nWhen you get to a point for a particular area where you feel like you have done all the basic optimizations and aren't sure where else you could reduce cost without seriously impacting your employees or customers, you have reached stage 4. This stage is all about analyzing the value of more complex changes that could reduce spend at the expense of something else, as well as considering the value and cost impact of major feature or architectural changes in the future.\n\n### GitLab example\nOur best example of this was our recent rollout of [advanced global search](https://docs.gitlab.com/ee/user/search/advanced_search.html) to all paid users on GitLab.com. In the first iterations of testing for this feature our costs were exceptionally high. Through a lot of hard work by the team responsible for the feature, they were able to significantly bring down the costs while improving functionality. Through those efforts, GitLab was able to bring this great feature to the platform in a way that also made sense from a business perspective.\n\n## 5. Predict future spend and problem areas\nOnce your company has matured the practices above, you can start to become proactive about observing cost. You can also begin to detect and alert when spend is outside expected thresholds. Once you get to this point, infrastructure optimization should become a boring topic, and when you no longer have any cases of huge unexpected cost increases that were not due to unexpected increases in customer demand, you know you are doing a great job.\n\n### GitLab example\n\nWe’re still working on this stage ourselves. While we’ve had some success in detecting unexpected spend, and even tying it to anomalous behavior in our platform, we recognize we have much more to do here. We are still working to get most of our usage to Stages 3-4, while spending parallel effort to reach Stage 5 for some more mature workloads.\n\n## Current state and next steps\nToday at GitLab, depending on the workload, we are anywhere between stages 1-4. The bulk of the work is going into getting everything to at least stage 2, and from there we can work on getting everything to stages 3-4. Current efforts include applying our newly created [infrastructure standards](/handbook/infrastructure-standards/) across all of our infrastructure, bringing in relevant product usage data from our various services, and giving PMs the tools they need to better manage the cost of their services through a single source of truth of base level cost metrics.\n\n## Workflow and planning\nCost optimization is a difficult topic to tackle effectively as it involves many different stakeholders across the business who all have their own priorities. The way we are taking this problem on at GitLab is we have an [issue board](https://gitlab.com/groups/gitlab-com/-/boards/1502173?label_name[]=infrafin) where we plan and track progress on issues related to infrastructure spend. For all the major initiatives we assign priority to these based on four factors:\n\n1.  Cost savings\n2.  Customer impact  \n3.  Future potential cost impact\n4.  Effort required\n  \nThese factors are discussed and reviewed by our analyst, our SaaS offering product manager, and the relevant subject matter expert for the area. Once the priority is agreed upon, the product manager works with various product teams to get these scheduled into milestones or backlog queues for the teams that need to implement the changes. Progress is tracked on the issue board, and reviewed for priority to ensure the solution moves forward at an appropriate velocity.\n\n## More to read\n\nAll of this info and more can be found in our [Cost Management Handbook](/handbook/engineering/infrastructure/cost-management/). We continue to improve this page to provide our own employees with the resources they need to understand this topic better, as well as providing external viewers some idea of how they could think about infrastructure optimization in their own company.\n\nYou might also enjoy:\n* [What we learned after a year of GitLab.com on Kubernetes](/blog/year-of-kubernetes/)\n* [How we migrated application servers from Unicorn to Puma](/blog/migrating-to-puma-on-gitlab/)\n* [How we upgraded PostgreSQL at GitLab.com](/blog/gitlab-pg-upgrade/)\n\nCover image by [Fabian Blank](https://unsplash.com/@blankerwahnsinn?utm_source=unsplash&amp;utm_medium=referral&amp;utm_content=creditCopyText) on [Unsplash](https://unsplash.com)\n{: .note}\n",[728,915,1347,873],{"slug":4811,"featured":6,"template":678},"how-we-optimized-our-infrastructure-spend-at-gitlab","content:en-us:blog:how-we-optimized-our-infrastructure-spend-at-gitlab.yml","How We Optimized Our Infrastructure Spend At Gitlab","en-us/blog/how-we-optimized-our-infrastructure-spend-at-gitlab.yml","en-us/blog/how-we-optimized-our-infrastructure-spend-at-gitlab",{"_path":4817,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4818,"content":4824,"config":4830,"_id":4832,"_type":16,"title":4833,"_source":17,"_file":4834,"_stem":4835,"_extension":20},"/en-us/blog/checkmarx-integration",{"title":4819,"description":4820,"ogTitle":4819,"ogDescription":4820,"noIndex":6,"ogImage":4821,"ogUrl":4822,"ogSiteName":692,"ogType":693,"canonicalUrls":4822,"schema":4823},"Get the most out of the Checkmarx integration with GitLab","Make it easier for developers to find bugs and for dev and sec to get along. Here’s what you need to know about the GitLab/Checkmarx integration.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681659/Blog/Hero%20Images/checkmarx.jpg","https://about.gitlab.com/blog/checkmarx-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Get the most out of the Checkmarx integration with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-10-12\",\n      }",{"title":4819,"description":4820,"authors":4825,"heroImage":4821,"date":4826,"body":4827,"category":14,"tags":4828},[2150],"2020-10-12","\n\nIn our 2020 Global DevSecOps survey, 65% of respondents said their organizations were shifting security left. Shifting left is the holy grail of [DevOps](/topics/devops/), certainly, but there’s reason to believe most organizations actually aren’t quite *left* enough: Less than 20% of respondents said developers were able to access either SAST or DAST scans from within their pipelines or IDEs.\n\nIt’s perhaps not surprising that, in the [same survey](/developer-survey/), security pros complained rather bitterly about developers finding too few bugs too late in the process.\n\nOne solution to this problem is to [integrate application security testing](/topics/devsecops/) earlier and actually within a development tool. During [our 2020 virtual user conference](/events/commit/), Commit, [James Brotsos](https://www.linkedin.com/in/jbrotsos/), a senior solutions engineer with [Checkmarx](https://www.checkmarx.com), walked attendees through the process of integrating his company’s security testing platform with GitLab.\n\n“(Integrating app security testing) really does free up time to focus on things that actually matter to developers, which is writing code,” James said during his presentation. “With this methodology, we are shifting far left into the software development life cycle. We still are providing governance and gating capabilities, constantly scanning the latest code and this replaces the need to scan it in the IDE.”\n\n## Getting started\n\nTo get the most out of an integrated security testing platform, James said companies should start by making a series of decisions: \n\nWhat do you want to scan? Commits or merge requests?\nWhen do you want to scan? Nightly, weekly, more often?\nHow do you want the data? Via the Checkmarx platform, emails, Slack messages, inside GitLab or through auto ticket creation?\n\n“We have an interactive security testing platform which is an agent that runs on a test server,” James explained. “It’s running your code, it monitors traffic driven from functional tests and it could run security types of queries on top of that. We provide… all these types of vulnerabilities and we train you how to fix them.”\n\nAt the heart of the GitLab integration with Checkmarx is CxFlow, a spring boot application that initiates scans and pursues results, James said. Scanning is initiated by integrating with [GitLab’s CI/CD](/topics/ci-cd/), or through a merge request or pushed code, triggering an already existing pipeline. That pipeline needs just a single edit to include the stage to execute a security scan.\n\nThe integration is completely customizable and developers can get what they need when they need it. CxFlow drives a result feedback loop so no manual intervention is required and developers can filter the types of defects created based on any filtering criteria. “The results are easy to consume in a way that developers want to consume them… and the results are actionable,” James said.\n\nWhen it comes to defect tracking, CxFlow solves the problem of having the same vulnerability type in the same file by creating just a single issue where the ticket automatically closes once its been dealt with. And developers can choose how they receive feedback: through GitLab’s security dashboard or issues, or through Jira, email, ServiceNow and Rally.\n\n## The nuts and bolts\n\nTo tie security scanning into GitLab, start by setting up the global variables that will allow access to the Checkmarx server. After that the CI/CD pipeline can kick off. Separate the Checkmarx stages from the GitLab CI file – you don’t want to “pollute” any existing YAML file set up by your DevOps team. Just include another YAML file with this stage or extension and that will allow you to have the Checkmarx-specific information which will kick off the CxFlow CLI.\n\nSo once the CxFlow starts to run inside that container, it will initiate a scan inside Checkmarx. The results will be sent back to CxFlow. “Depending on how you want to consume those results, we can update the security dashboard, we can update the issues, we can update the merge request, or we can update all three of them at the same time,” James said.\n\nCxFlow can also create issues automatically that can then be triaged to an epic or assigned to a specific user. “This way you can treat all security vulnerabilities as you would any other defect or any other kind of issue,” James said.\n\n“This is a pretty effortless option for the development teams to scan projects quickly,” James said. “There is no overhead when configuring and managing these builds. You can quickly automate the scan of multiple repositories and there's no overhead in configuring and managing all of these different repos that you might have.”\n\n## A deeper dive\n\nA more detailed look at this project can be found [on the Checkmarx website](https://checkmarx.atlassian.net/wiki/spaces/SD/pages/1929937052/GitLab+Integration) or watch the entire Commit presentation:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/W1Wk3PN0o1M\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nCover image by [JJ Ying](https://unsplash.com/@jjying) on [Unsplash](https://unsplash.com)\n{: .note}\n",[1307,1084,4829],"developer survey",{"slug":4831,"featured":6,"template":678},"checkmarx-integration","content:en-us:blog:checkmarx-integration.yml","Checkmarx Integration","en-us/blog/checkmarx-integration.yml","en-us/blog/checkmarx-integration",{"_path":4837,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4838,"content":4844,"config":4850,"_id":4852,"_type":16,"title":4853,"_source":17,"_file":4854,"_stem":4855,"_extension":20},"/en-us/blog/incident-management-with-aws-cloudwatch",{"title":4839,"description":4840,"ogTitle":4839,"ogDescription":4840,"noIndex":6,"ogImage":4841,"ogUrl":4842,"ogSiteName":692,"ogType":693,"canonicalUrls":4842,"schema":4843},"How to use GitLab's Incident Management with AWS CloudWatch","It's a straightforward process to set up GitLab Incident Management to work with AWS CloudWatch alarms. Here's what you need to know to get started.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664070/Blog/Hero%20Images/cloudwatch-gitlab-incident-management-bg.jpg","https://about.gitlab.com/blog/incident-management-with-aws-cloudwatch","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab's Incident Management with AWS CloudWatch\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sean Arnold\"}],\n        \"datePublished\": \"2020-10-08\",\n      }",{"title":4839,"description":4840,"authors":4845,"heroImage":4841,"date":4847,"body":4848,"category":14,"tags":4849},[4846],"Sean Arnold","2020-10-08","\n\nAWS CloudWatch is a popular tool for users of Amazon Web Services to monitor and set alarms on their resources, including EC2 instances, RDS databases and many more.\n\nWhen alarms fire, it is important that your toolchain can quickly and effectively notify you and collate the relevant data. This enables your team to start determining the root cause and take action toward remediation.\n\nGitLab Incident Management now makes it easier than ever to do this. GitLab can take AWS CloudWatch alerts (aka [alarms](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html)), or alerts from any other monitoring and alerting tool you have, and seamlessly integrate them into your [DevOps lifecycle](/topics/devops/).\n\n\n\n## Getting your alerts from AWS CloudWatch to GitLab\n\nNote: For this post, we will assume you are familiar with setting up CloudWatch metrics and alarms within AWS. For more information on AWS Cloudwatch consult the [AWS documentation](https://aws.amazon.com/cloudwatch/).\n\n### Enable the endpoint\n\nWith our generic alert endpoint, GitLab can ingest alerts via REST from any alerting service you have. An alert can be as simple as providing a title or as complex as you need. We provide some defined attributes that you can use to refine your GitLab Incident Management experience, such as the severity of the alert, the service that is alerting, and `gitlab_environment_name` so that you can get an [insight into your alerts for an associated environment and deployment](https://docs.gitlab.com/ee/ci/environments/#environment-incident-management) for users on our Gold and Ultimate plans.\n\nThe first step is to enable your project's alert endpoint. Follow the instructions in the [docs](https://docs.gitlab.com/ee/operations/incident_management/integrations.html#setting-up-generic-alerts) to do this.\n\nNext, we need to ensure the data sent to GitLab is in the expected payload format.\n\n### Transform the payload\n\nOne approach to send CloudWatch alarm data to GitLab is to use AWS Lambda to call the GitLab REST endpoint. We can set this up by publishing the CloudWatch alarm to an [SNS](https://aws.amazon.com/sns/) endpoint, which is then consumed by AWS Lambda to mutate and forward the alert payload to GitLab.\n\n![AWS CloudWatch to GitLab Incident Management](https://about.gitlab.com/images/blogimages/cloudwatch-incident-management-flow.png)\n\nIf you want to get this up and running quickly, I’ve [provided an AWS SAM (Serverless Application Model) application](https://gitlab.com/gitlab-examples/ops/incident-setup/everyone/cloudwatch-sns-to-gitlab-alerts) which can setup the Lambda application with the environment variables ready for you to enter your GitLab endpoint URL in.\n\nWe know that managing the integration between two tools can be painful. In the future, we want to make this step as easy as possible: the step of transforming your payload into GitLab Alert format will soon be replaced by [custom endpoints for alerts](https://gitlab.com/groups/gitlab-org/-/epics/4390).\n\nNext, you can [setup your SNS Notification Topic](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/US_SetupSNS.html), and subscribe to the [SNS Topic with your Lambda function](https://docs.aws.amazon.com/sns/latest/dg/sns-lambda-as-subscriber.html).\n\n### Receive your alerts\n\nWhen your CloudWatch alarm next triggers, Lambda should then fire the alert off to GitLab. You should then see your alert in the [Alert list](https://docs.gitlab.com/ee/operations/incident_management/alerts.html).\n\n![AWS CloudWatch to GitLab Incident Management alert list](https://about.gitlab.com/images/blogimages/cloudwatch-gitlab-incident-management-list.png)\n\nYou can click on an alert to [see more details](https://docs.gitlab.com/ee/operations/incident_management/alerts.html), assign an alert to a user and change the status of the alert. If the alert is significant enough to raise an incident, you can do that by clicking the “Create Incident.”\n\nCreating an incident will give you the power to assign team members to it and collaborate on it just like you would a regular GitLab issue. The incident will have the payload of the alert included in the [Alert Details tab](https://docs.gitlab.com/ee/operations/incident_management/incidents.html#alert-details).\n",[894,232,1286],{"slug":4851,"featured":6,"template":678},"incident-management-with-aws-cloudwatch","content:en-us:blog:incident-management-with-aws-cloudwatch.yml","Incident Management With Aws Cloudwatch","en-us/blog/incident-management-with-aws-cloudwatch.yml","en-us/blog/incident-management-with-aws-cloudwatch",{"_path":4857,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4858,"content":4864,"config":4869,"_id":4871,"_type":16,"title":4872,"_source":17,"_file":4873,"_stem":4874,"_extension":20},"/en-us/blog/introducing-the-gitlab-kubernetes-agent",{"title":4859,"description":4860,"ogTitle":4859,"ogDescription":4860,"noIndex":6,"ogImage":4861,"ogUrl":4862,"ogSiteName":692,"ogType":693,"canonicalUrls":4862,"schema":4863},"Understand the new GitLab Agent for Kubernetes","Just released in 13.4, our brand new Kubernetes Agent provides a secure and K8s–friendly approach to integrating GitLab with your clusters.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666262/Blog/Hero%20Images/default-blog-image.png","https://about.gitlab.com/blog/introducing-the-gitlab-kubernetes-agent","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Understand the new GitLab Agent for Kubernetes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2020-09-22\",\n      }",{"title":4859,"description":4860,"authors":4865,"heroImage":4861,"date":4866,"body":4867,"category":14,"tags":4868},[2014],"2020-09-22","\n\nWe are happy to share the first iteration of the GitLab Agent for Kubernetes with our users and community. The Agent is the foundation for the next generation of the integration between GitLab and Kubernetes. \n\n## A bit of history of the GitLab Kubernetes Integrations\n\nGitLab's current Kubernetes integrations were introduced more than three years ago. Their primary goal was to allow a simple setup of clusters and provide a smooth deployment experience to our users. These integrations served us well in the past years but at the same time its weaknesses were limiting for some important and crucial use cases. The biggest weaknesses we see with the current integration are:\n\n- the requirement to open up the cluster to the internet, especially to GitLab\n- the need for cluster admin rights to get the benefit of GitLab Managed Clusters\n- exclusive support for push-based deployments that might not suit some highly regulated industries\n\nA few months ago, the Configure Team at GitLab started going in a new direction to come up with an integration that could address these weaknesses and provide a cloud native tie-in between GitLab and Kubernetes. This new direction is built on the GitLab Agent for Kubernetes, which we released in [GitLab 13.4](/releases/2020/09/22/gitlab-13-4-released/).\n\n## Design goals\n\nWhen we sat down to solve for the above weaknesses, we came up with a few principles that we are seeking to follow.\n\nWe want to be good cloud native citizens, and work together with the community, instead of reinventing the wheel.\n\nWe primarily want to serve expert Kubernetes platform engineers. While the current GitLab Managed Clusters and cluster creation from within GitLab might serve many use cases, it's primarily aimed at simple cluster setup and is not flexible enough to be the basis for production clusters. We want to change this approach, and are focusing on the needs of expert Kubernetes engineers first. We think that coming up with sane defaults will provide the necessary simplicity for new Kubernetes users as well.\n\nWe want to offer a secure solution that allows cluster operators to restrict GitLab's rights in the cluster and does not require opening up the cluster to the Internet.\n\n## The Agent\n\nFollowing the above goals, we've started to develop the GitLab Agent for Kubernetes. The Agent provides a permanent communication channel between GitLab and the cluster. To follow industry best practices for [GitOps](/topics/gitops/) it is configured by code, instead of a UI.\n\nThe current version of the Agent allows for pull-based deployments. Its deployment machinery is built on the [`gitops-engine`](https://github.com/argoproj/gitops-engine), a project initiated by ArgoCD and Flux where GitLab engineers are actively contributing as well.\n\n### Setting up the GitLab Agent\n\nThe Agent needs to be set up first. This requires a few actions from the user:\n\n- create an Agent token for authentication with GitLab, and store it in your cluster as a secret\n- commit the necessary Agent configurations in one of your repositories\n- install the Agent to your cluster\n\n### Deployments with an Agent\n\nAs mentioned above, the Agent needs a configuration directory inside one of your repositories. This configuration describes the projects that the Agent syncs into your clusters. We call the synced projects the __manifest project__. The manifest project should contain Kubernetes manifest files. The __manifest project__ project might be either inside or separated from your application code.\n\nWe've set up a simple example that shows a __manifest project__ and an __application project__. In this example [GitLab CI/CD](/topics/ci-cd/) in the __application project__ is used to create a container image and update the __manifest project__. Then the Agent picks up the changes from the __manifest project__, and deploys the Kubernetes manifests stored there.\n\n### Limitations\n\nAs this is the initial release of the Agent, it has many known limitations. We don't support all the amazing features the previous GitLab Kubernetes integration does such as [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/), deploy boards, GitLab Managed Apps, etc. To start in GitLab 13.4 we limited our focus to supporting pull-based deployment for Helm-based GitLab installations. \n\nFollowing the current release, we will be focusing on:\n\n- [shipping the GitLab Agent for Kubernetes as part of the Official Linux Package](https://gitlab.com/groups/gitlab-org/-/epics/3834)\n- [supporting the deployment of private repositories](https://gitlab.com/gitlab-org/gitlab/-/issues/220912)\n\n## Further plans for GitLab Kubernetes Integrations\n\nThe Agent opens up many new opportunities for GitLab's Kubernetes integrations. Having an active component allows us to provide all the GitLab functionalities in locked down clusters as well. We're currently looking into the following areas to support with the agent:\n\n- integrate cluster-side dynamic container scanning with GitLab\n- use GitLab as an authentication and authorization provider for Kubernetes clusters\n- offer linters and checks for Kubernetes best practices on deployed resources\n- proxy cluster services easily through GitLab\n\nYou can see all our plans in the [Agent epic](https://gitlab.com/groups/gitlab-org/-/epics/3329) where we invite you to give us feedback and about this direction. \n\nYou can view a demo of how to install and use the GitLab Agent below:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://player.vimeo.com/video/505413162\" width=\"640\" height=\"480\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\u003C/figure>\n",[1002,2331,873,727],{"slug":4870,"featured":6,"template":678},"introducing-the-gitlab-kubernetes-agent","content:en-us:blog:introducing-the-gitlab-kubernetes-agent.yml","Introducing The Gitlab Kubernetes Agent","en-us/blog/introducing-the-gitlab-kubernetes-agent.yml","en-us/blog/introducing-the-gitlab-kubernetes-agent",{"_path":4876,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4877,"content":4883,"config":4889,"_id":4891,"_type":16,"title":4892,"_source":17,"_file":4893,"_stem":4894,"_extension":20},"/en-us/blog/year-of-kubernetes",{"title":4878,"description":4879,"ogTitle":4878,"ogDescription":4879,"noIndex":6,"ogImage":4880,"ogUrl":4881,"ogSiteName":692,"ogType":693,"canonicalUrls":4881,"schema":4882},"What we learned after a year of GitLab.com on Kubernetes","It's been one year since we moved GitLab.com to Kubernetes. We unpack the challenges and learnings from this major migration.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681569/Blog/Hero%20Images/nico-e-AAbjUJsgjvE-unsplash.jpg","https://about.gitlab.com/blog/year-of-kubernetes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What we learned after a year of GitLab.com on Kubernetes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Jarvis\"}],\n        \"datePublished\": \"2020-09-16\",\n      }",{"title":4878,"description":4879,"authors":4884,"heroImage":4880,"date":4886,"body":4887,"category":14,"tags":4888},[4885],"John Jarvis","2020-09-16","\n\nFor about a year now, the infrastructure department has been working on migrating all services that run on GitLab.com to Kubernetes. The effort has not been without challenges, not only with moving services to Kubernetes but also managing a hybrid deployment during the transition. We have learned a number of lessons along the way that we will explore in this post.\n\nSince the very beginning of GitLab.com, servers for the website have run in the cloud on virtual machines. These VMs are managed by Chef and installed using our [official Linux package](/install/#ubuntu).\nWhen an application update is required, [our deployment strategy](https://gitlab.com/gitlab-org/release/docs/-/blob/master/general/deploy/gitlab-com-deployer.md) is to simply upgrade fleets of servers in a coordinated rolling fashion using a CI pipeline.\nThis method, while slow and a bit [boring](https://handbook.gitlab.com/handbook/values/#boring-solutions), ensures that GitLab.com is using the same installation methods and configuration as our self-managed customers who use Linux packages.\nWe use this method because it is especially important that any pain or joy felt by the community when installing or configuring self-managed GitLab is also felt by GitLab.com.\nThis approach worked well for us for a time but as GitLab.com has grown to hosting over 10 million projects we realized it would no longer serve our needs for scaling and deployments.\n\n## Enter Kubernetes and cloud native GitLab\n\nWe created the [GitLab Charts](https://gitlab.com/gitlab-org/charts) project in 2017 to prepare GitLab for deployments in the cloud and enable self-managed users to install GitLab into a Kubernetes cluster. We knew then that running GitLab.com on Kubernetes would benefit the SaaS platform for scaling, deployments, and efficient use of compute resources. At the time though there were still many application features that depended on NFS mounts that delayed our migration off of VMs.\n\nThe push for cloud native and Kubernetes gave engineering an opportunity to plan a gradual transition that removes some of the network storage dependencies on the application while continuing to develop new features. Since we started planning the migration in the summer of 2019, most of these limitations have been resolved and the journey to running all of GitLab.com on Kubernetes is now well underway!\n\n## Running GitLab.com on Kubernetes\n\nFor GitLab.com we use a single regional GKE cluster that services all application traffic. To minimize the complexity of the (already complex) migration we focus on services that don't depend on local storage or NFS. While GitLab.com is running from mostly monolithic Rails codebase, we route traffic depending on workload characteristics to different endpoints which are isolated into their own node pools.\n\nOn the frontend these types are divided into web, API, git SSH/HTTPs requests, and Registry.\nOn the backend we divide our queued jobs into different characteristics depending on [predefined resource boundaries](/blog/scaling-our-use-of-sidekiq/) that allow us to set Service-level Objective (SLO) targets for a range of different workloads.\n\nAll of these GitLab.com services are configured with the unmodified GitLab Helm chart, which configures them in sub-charts that can be selectively enabled as we gradually migrate services to the cluster.\nWhile we opted to not include some of our stateful services such as Redis, Postgres, GitLab Pages, and Gitaly, when the migration to Kubernetes is finished it will drastically reduce the number of VMs that we currently manage with Chef.\n\n## Transparency and managing the Kubernetes configuration\n\nAll configuration is managed in GitLab itself in three configuration projects using Terraform and Helm.\nWhile we use GitLab to run GitLab wherever possible, we maintain a separate GitLab installation for operations.\nThis is done to ensure we do not depend on the availability of GitLab.com for deployments and upgrades of GitLab.com.\n\nEven though our pipelines that execute against the Kubernetes cluster run on this separate GitLab deployment, the code repositories are mirrored and publicly viewable at the following locations:\n\n* [k8s-workloads/gitlab-com](https://gitlab.com/gitlab-com/gl-infra/k8s-workloads/gitlab-com): GitLab.com configuration wrapper for the GitLab Helm chart.\n* [k8s-workloads/gitlab-helmfiles](https://gitlab.com/gitlab-com/gl-infra/k8s-workloads/gitlab-helmfiles/): Contains the configuration for services that are not directly related to the GitLab application. This includes configurations for cluster logging and monitoring and integrations like PlantUML.\n* [gitlab-com-infrastructure](https://gitlab.com/gitlab-com/gitlab-com-infrastructure): Terraform configuration for the Kubernetes and legacy VM infrastructure. All the resources necessary to run the cluster are configured here, including the cluster, node pools, service accounts, and IP address reservations.\n\n[![hpa](https://about.gitlab.com/images/blogimages/a_year_of_k8s/hpa.png)](https://gitlab.com/gitlab-com/gl-infra/k8s-workloads/gitlab-com/-/merge_requests/315#note_390180361)\nWhenever a change is proposed, a public [short summary](https://gitlab.com/gitlab-com/gl-infra/k8s-workloads/gitlab-com/-/merge_requests/315#note_390180361) is displayed, with a link to detailed diff that an SRE reviews before applying changes to the cluster.\n{: .note.text-center}\n\nFor SREs, we link to a detailed diff on our operations GitLab instance that has limited access.\nThis allows employees and the community, who do not have access to the operational project which is limited to SREs, to have visibility into proposed config changes.\nBy having a public GitLab instance for code, and a private instance for [CI pipelines](/solutions/continuous-integration/), we are able to keep a single workflow while at the same time ensuring we don't have a dependency on GitLab.com for configuration updates.\n\n## The lessons we learned along the way\n\nWe have learned a few things along the way, lessons that we are applying to future migrations and new deployments into Kubernetes.\n\n### Increased billing from cross-AZ traffic\n\n![git egress](https://about.gitlab.com/images/blogimages/a_year_of_k8s/git_egress.png)\nDaily egress bytes/day from the Git storage fleet on GitLab.com.\n{: .note.text-center}\n\nGoogle divides its network into regions and regions are divided into availability zones (AZs).\nBecause of the large amount of bandwidth required for Git hosting, it is important we are cognizant of network egress. For internal network traffic, egress is only free-of-charge if it remains in a single AZ.\nAt the time of writing this blog post, we deliver approximately 100TB on a typical work day for just Git repositories.\nOn legacy VM topology, services that were previously colocated on the same VMs are now running in Kubernetes pods.\nThis mean some network traffic that was previously local to a VM can now potentially traverse availability zones.\n\nRegional GKE clusters provide the convenience of spanning multiple availability zones for redundancy.\nWe are considering [splitting the regional GKE cluster into single zonal clusters](https://gitlab.com/gitlab-com/gl-infra/delivery/-/issues/1175) for services that use a lot of bandwidth to avoid network egress charges while maintaining redundancy at the cluster level.\n\n### Resource limits, requests, and scaling\n\n![replicas](https://about.gitlab.com/images/blogimages/a_year_of_k8s/replicas.png)\nNumber of replicas servicing production traffic on registry.gitlab.com, Registry traffic reaches it peak at ~15:00UTC.\n{: .note.text-center}\n\nOur migration story began in August 2019 when we migrated the GitLab Container Registry to Kubernetes, the first service to move.\nThough this was a critical and high traffic service, it was a good choice for the first migration because it is a stateless application with only a few external dependencies.\nThe first challenge we experienced was the large number of evicted pods, due to memory constraints on our nodes.\nThis required multiple changes to requests and limits. We found that with an application that increases its memory utilization over time, low requests (which reserves memory for each pod) and a generous hard limit on utilization was a recipe for node saturation and a high rate of evictions.\nTo adjust for this [we eventually decided to use higher requests and lower limit](https://gitlab.com/gitlab-com/gl-infra/delivery/-/issues/998#note_388983696) which took pressure off of the nodes and allowed pods to be recycled without putting too much pressure on the node.\nAfter experiencing this once, we start our migrations with generous requests and limits that are close to the same value, and adjust down as needed.\n\n### Metrics and logging\n\n![registry-general](https://about.gitlab.com/images/blogimages/a_year_of_k8s/registry-general.png)\nThe Infrastructure department focuses on latency, error rates and saturation that have [Service-level objectives (SLOs)](https://en.wikipedia.org/wiki/Service-level_objective) that tie into our [overall system availability](https://dashboards.gitlab.net/d/general-slas/general-slas?orgId=1).\n{: .note.text-center}\n\nOver the past year, one of the major changes in the infrastructure department was improvements to how we monitor and manage SLOs.\nSLOs allowed us to set targets on individual services which were monitored closely during the migration.\nYet even with this improved observability, we can't always see problems right away with our metric reporting and alerting.\nFor example, focusing on latency and error rates may not adequately cover all uses of the service that is being migrated.\nWe discovered this problem very early with some of the workloads that were moved into the cluster. This challenge was particularly acute when we had to validate features that do not receive many requests but have very specific configuration dependencies.\nOne of the key migration lessons was to also evaluate more than just monitoring metrics, but also logs, and the long-tail of errors in our monitoring.\nNow for every migration we include a detailed list of log queries and plan a clear rollback procedures that can be handed off from one shift to the next in case of issues.\n\nServing the same requests on legacy VM infrastructure and Kubernetes simultaneously presented a unique challenge.\nUnlike a lift-and-shift migration, running on legacy VMs and Kubernetes at the same time requires that our observability is compatible with both and combines metrics into one view.\nMost importantly, we are using the same dashboards and log queries to ensure the observability is consistent during the transition period.\n\n### Shifting traffic to the new cluster\n\nFor GitLab.com we maintain a segmentation of our fleet named the [canary stage](/handbook/engineering/#canary-testing).\nThis canary fleet services our internal projects, [or can be enabled by users](https://next.gitlab.com), and is deployed to first for infrastructure and application changes.\nThe first service we migrated started with taking limited traffic internally and we are continuing to use this method to ensure we are meeting our SLOs before committing all traffic to the cluster.\nWhat this means for the migration is requests to internal projects are first routed to Kubernetes and then we slowly move other traffic to the cluster using HAProxy backend weighting.\nWe learned in the process of moving from VMs to Kubernetes that it was extremely beneficial for us to have an easy way to move traffic between the old and new infrastructure, and to keep legacy infrastructure available for rollback in the first few days after the migration.\n\n### Reserved pod capacity and utilization\n\nOne problem we identified early was, while our pod start times for the Registry service were very short, our start times for Sidekiq took as long as [two minutes](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/1775).\nThe long Sidekiq start times posed a challenge when we started moving workloads to Kubernetes for workers that need to process jobs quickly and scale fast.\nThe lesson here was while the Horizontal Pod Autoscaler (HPA) works well in Kubernetes for adapting to increased traffic, it is also important to evaluate workload characteristics and set reserved pod capacity, especially for uneven demand.\nIn our case, we saw a sudden spike in jobs which caused a large scaling event which saturated CPU before we could scale the node pool.\nWhile it is tempting to squeeze as much as possible out of the cluster, after experiencing some initial performance problems we now start with a generous pod budget and scale down later, while keeping a close eye on SLOs.\nThe pod start times for Sidekiq service have improved significantly and now average about 40 seconds. [Improving the pod start times](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/1775) benefited GitLab.com as well as all the self-managed customers using the official GitLab Helm chart.\n\nAfter transitioning each service, we enjoyed many benefits of using Kubernetes in production, including much faster and safer deploys of the application, scaling, and more efficient resource allocation.\nThe migration benefits extend beyond GitLab.com. With each improvement of the official Helm chart, we provide additional benefits to our self-managed customers.\n\nWe hope you enjoyed reading about our Kubernetes migration journey. As we continue to migrate more services to the cluster you can read more at following links:\n\n* [Why are we migrating to Kubernetes?](/handbook/engineering/infrastructure/production/kubernetes/gitlab-com/)\n* [GitLab.com on Kubernetes](/handbook/engineering/infrastructure/production/architecture/#gitlab-com-on-kubernetes)\n* [Tracking epic for the GitLab.com Kubernetes Migration](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/112)\n\nCover image by [Nico E.](https://unsplash.com/@xnico) on [Unsplash](https://www.unsplash.com/)\n{: .note}\n\n## Read more on Kubernetes:\n\n- [How to install and use the GitLab Kubernetes Operator](/blog/gko-on-ocp/)\n\n- [Threat modeling the Kubernetes Agent: from MVC to continuous improvement](/blog/threat-modeling-kubernetes-agent/)\n\n- [How to deploy the GitLab Agent for Kubernetes with limited permissions](/blog/setting-up-the-k-agent/)\n\n- [A new era of Kubernetes integrations on GitLab.com](/blog/gitlab-kubernetes-agent-on-gitlab-com/)\n\n- [Understand Kubernetes terminology from namespaces to pods](/blog/kubernetes-terminology/)\n",[915,1286],{"slug":4890,"featured":6,"template":678},"year-of-kubernetes","content:en-us:blog:year-of-kubernetes.yml","Year Of Kubernetes","en-us/blog/year-of-kubernetes.yml","en-us/blog/year-of-kubernetes",{"_path":4896,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4897,"content":4903,"config":4909,"_id":4911,"_type":16,"title":4912,"_source":17,"_file":4913,"_stem":4914,"_extension":20},"/en-us/blog/gitlab-pg-upgrade",{"title":4898,"description":4899,"ogTitle":4898,"ogDescription":4899,"noIndex":6,"ogImage":4900,"ogUrl":4901,"ogSiteName":692,"ogType":693,"canonicalUrls":4901,"schema":4902},"How we upgraded PostgreSQL at GitLab.com","We explain the precise maintenance process to execute a major version upgrade of PostgreSQL.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668002/Blog/Hero%20Images/pg-gear.jpg","https://about.gitlab.com/blog/gitlab-pg-upgrade","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we upgraded PostgreSQL at GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jose Finotto\"}],\n        \"datePublished\": \"2020-09-11\",\n      }",{"title":4898,"description":4899,"authors":4904,"heroImage":4900,"date":4906,"body":4907,"category":14,"tags":4908},[4905],"Jose Finotto","2020-09-11","\n\nWe teamed up with [OnGres](https://ongres.com/) to [perform a major version upgrade of GitLab.com's main Postgres cluster from version 9.6 to 11](https://status.gitlab.com/pages/maintenance/5b36dc6502d06804c08349f7/5ea322c1d1097004ba30d227) back in May 2020. We upgraded it during a maintenance window, and it all went according to plan. We unpack all that was involved – from planning, testing, and full process automation – to achieve a near-perfect execution of the PostgreSQL upgrade. The full operation was recorded and you can [watch it on GitLab Unfiltered](https://youtu.be/TKODwTtKWew).\n\nThe biggest challenge was to do a complete fleet major upgrade through an orchestrated [pg_upgrade](https://www.postgresql.org/docs/11/pgupgrade.html). We needed to have a rollback plan to optimize our capacity right after [Recovery Time Objective (RTO)](https://en.wikipedia.org/wiki/Disaster_recovery) while maintaining a 12-node cluster’s 6TB-data consistent serving 300.000 aggregated transactions per second from around six million users.\n\nThe best way to resolve an engineering challenge is to follow the blueprints and design docs. In the process of creating the blueprint, you define the problem that we are attempting to solve, evaluate the most suitable solutions, and consider the pros and cons of each solution. Here is a [link](https://gitlab.com/gitlab-com/gl-infra/readiness/-/tree/master/library/database/postgres/Postgresql-upgrade/blueprint/) to the blueprint from the project.\n\nAfter the blueprint comes the design process. The implementation is detailed in the design process, where we explain the steps and requirements involved in executing the design. The design doc from the project is [linked here](https://gitlab.com/gitlab-com/gl-infra/readiness/-/tree/master/library/database/postgres/Postgresql-upgrade/design).\n\n## Why we upgraded PostgreSQL\n\nWe made a business decision in GitLab 13.0 to discontinue support for Postgresql 10.0. PostgreSQL version 9.6 is becoming EOL in November 2021, so we needed to take action.\n\nHere are some of the main differences in features [between PostgreSQL versions 9.6 and 11](https://why-upgrade.depesz.com/show?from=9.6.18&to=11.7&keywords=):\n\n * Native table partitioning, supporting LIST, RANGE, and HASH.\n * Transaction supporting in stored procedures.\n * [Just-in-time (JIT) compilation](https://www.postgresql.org/about/news/1894/) for accelerating the execution of expressions in queries.\n * Query parallelism improvements and adds parallelized data definition capabilities.\n * The new PostgreSQL version comes with the \"[Logical Replication - A publish/subscribe framework for distributing data](https://www.postgresql.org/about/news/1786/)\" that was introduced in version 10. This feature enables smoother future upgrades and simplifies other relevant processes.\n * Quorum-based commit that would ensure our transactions are committed in the specified nodes from the cluster.\n * Improved performance for queries over partitioned tables\n\n## The environment and architecture\n\nThe infrastructure capacity of the PostgreSQL cluster consisted of 12 n1-highmem-96 GCP instances for OLTP and asynchronous pipelines purposes – plus two BI nodes within different specs, each one with 96 CPU cores and 614GB RAM. The cluster HA is managed and configured through [Patroni](https://github.com/zalando/patroni), which keeps a consistent leader election through a Consul cluster and all its replicas working with asynchronous streaming replication using replication slots and WAL shipping against a GCS storage bucket.\nOur setup currently uses Patroni HA solution, which constantly gathers critical information about the cluster, leader detection, and node availability. It is implemented using key features from Consul, such as DNS service, which in turn updates PgBouncer endpoints, keeping a different architecture for traffic read-write and read-only.\n\n![GitLab.com Architecture{: .note.text-center}](https://about.gitlab.com/images/blogimages/pg-up-arch.png)\n[GitLab.com architecture](/handbook/engineering/infrastructure/production/architecture/#database-architecture)\n{: .note.text-center}\n\nFor HA purposes, two of the replicas are out of the read-only server list pool, used by the API, and served by Consul DNS. After several enhancements to Gitlab's architecture, we were able to downscale the fleet to seven nodes.\n\nFurthermore, the entire cluster handles a weekly average of approximately 181,000 transactions per second. As the image below indicates, the traffic increases on Monday and maintains the throughput during the week right up to Friday/Saturday. The traffic data was critical to set up a proper maintenance window so we can impact the fewest users.\n\n![GitLab.com Connection Numbers](https://about.gitlab.com/images/blogimages/pg-up-prom1.png)\nNumber of connections at GitLab.com\n{: .note.text-center}\n\nThe fleet is reaching 250,000 transactions per second in the busiest hours of the day.\n\n![GitLab.com Commits](https://about.gitlab.com/images/blogimages/pg-up-prom2.png)\nThe number of commits at GitLab.com.\n{: .note.text-center}\n\nIt is also handling spikes of 300,000 transactions per second. GitLab.com is reaching 60,000 connections per second.\n\n## Our upgrade requirements\n\nWe established a number of requirements before proceeding with the upgrade at production.\n\n * No regressions should be on PostgreSQL 11. We developed a custom benchmark to perform extensive regression testing. The goal was to identify potential query performance degradation in PostgreSQL 11.\n * The upgrade should be done across the whole fleet within the maintenance window.\n * Use pg_upgrade which relies on physical, and not logical, replication.\n * Keep a 9.6 cluster sample: Not all the nodes should be upgraded, a few of them should be left in 9.6 as a rollback procedure.\n * The upgrade should be fully automated to reduce the chance of any human error.\n * Only 30 minutes of maintenance threshold time for all the database upgrades.\n * The upgrade will be recorded and published.\n\n## The project\n\nTo accomplish a smooth execution in production, the project had the following phases:\n\n### Phase one: Develop automation in a isolated environment\n\n* Develop the [ansible-playbook](https://gitlab.com/gitlab-com/gl-infra/db-migration/-/tree/master/pg-upgrade) and test on a PostgreSQL environment (created using a back-up from staging) for these tests.\n* We used a separate environment to have the freedom to stop, initiate or restore the backup at any time, to focus on the development, and be able to restore an environment shortly before the upgrade.\n* We used a backup from staging to get the upgrade project in contact with the environment, where we faced some challenges such as migrating the procedures that are different for monitoring in our database.\n\n### Phase two: Integrate development with our configuration management in staging\n\n* Integrate with our configuration management in Chef, and execute a snapshot from the database disk that could be used in a restore scenario.\n* We told our customers that we would schedule a maintenance window with the goals of having the least impact possible on their work and to execute a safe upgrade without any risk of data loss.\n* After iterating and testing the integration to our configuration management we started to execute end-to-end tests in staging. Those tests were announced internally, so the other teams that share this environment would know that staging would be unavailable for a period of time.\n\n### Phase three: Test the upgrade end-to-end in staging\n\n * Pre-flight checks on the environment. We sometimes found problems with credentials or made tiny adjustments to improve the efficiency of our tests.\n * Stop all the applications and traffic to GitLab.com, add a maintenance mode in CloudFlare and HA-proxy, and stop all the applications that accessed the database, sidekiq, workhorse, WEB-API, etc.\n * Upgrade three nodes from the six node cluster. We had a similar strategy in production with a rollback scenario in mind.\n * Execute the ansible-playbook for the PostgreSQL upgrade, first on the leader database node, and after on the secondaries nodes.\n * Regarding post upgrade: We executed some automated tests in our ansible-playbook, checking that the replication and data were consistent.\n * Next, we started the applications to enable our QA team to execute several tests suites. They executed local unit tests on the upgraded database. We investigated negative results.\n * Once we finished the test we stopped the applications again to restore the staging cluster to version 9.6 and shut down the upgraded nodes to version 11, and started the old cluster. Where Patroni will promote one of the nodes, start the applications and the cluster could receive the traffic back. We restored the Chef configuration to the cluster 9.6 and rebuilt those databases to have six nodes ready for the next test.\n\nWe executed seven tests in staging in total, iterating to perfect the team's execution.\n\n### Phase four: Upgrade in production\n\nIn production, the steps were very similar to staging, and our plan was to have eight nodes migrated and four left behind as a backup:\n\n * Execute the pre-checks for the project.\n * Announce the start of the maintenance.\n * Execute the ansible-playbook to stop the traffic and application.\n * Execute the ansible-playbook to carry out the PostgreSQL upgrade.\n * Start the validation tests and restore the traffic. We performed the minimum amount of tests required, so we could fit everything in the narrow maintenance window.\n\nThe rollback plan would only be called in case of any problems with the database consistency, or errors in the QA test. The steps included:\n\n * Stop the cluster with PostgreSQL 11.\n * Restore the configuration in Chef to PostgreSQL 9.6.\n * Initialize the cluster with the four nodes in version 9.6. With these four nodes, we could restore the activity for GitLab.com when traffic was quieter.\n * Start receiving traffic – with this approach we could minimize downtime.\n * Recreate the other nodes using disk snapshot image that were taken during the maintenance and before the upgrade.\n\nAll the steps of the upgrade are detailed in the template used to execute the project.\n\n## How pg_upgrade works\n\nThe [pg_upgrade](https://www.postgresql.org/docs/11/pgupgrade.html) process allows us to upgrade data files from PostgreSQL to a later PostgreSQL major version, without using a dump/reload strategy which would require more downtime.\n\nAs explained in the [official PostgreSQL documentation](https://www.postgresql.org/docs/11/pgupgrade.html), the pg_upgrade tool avoids performing the dump/restore method to upgrade the PostgreSQL version. There are some important details to review before proceeding with this tool. Major PostgreSQL releases add new features that often change the layout of the system tables, but the internal data storage format rarely changes. If a major release changes the data storage format, pg_upgrade could not be used, so we must verify what changes were included between the major versions.\n\nIt is important that any external modules are also binary-compatible, though this cannot be checked by pg_upgrade. For the GitLab upgrade, we uninstalled views/extensions such as [postgres_exporter](https://github.com/wrouesnel/postgres_exporter) before the upgrade, to recreate them after the upgrade (with slight modifications for compatibility reasons).\n\nBefore performing the upgrade, the new version binaries have to be installed. The new binaries from PostgreSQL and extensions were installed in the set of hosts, that were listed to be upgraded.\n\nThere are some options when using pg_upgrade. We chose to use pg_upgrade's link mode on the Leader node because of our narrow, two-hour maintenance window. This method avoids copying the 6TB data files by hard linking files through [inode](https://en.wikipedia.org/wiki/Inode). The drawback is the old data cluster could not be rolled back to 9.6. We provided a rollback path via the replicas kept in 9.6 and GCP snapshots as a secondary choice.\nRebuilding the replicas from scratch was not an option either so we used rsync to upgrade them using incremental features. pg_upgrade's documentation says: \"From a directory on the primary server that is above the old and new database cluster directories, run this on the primary for each standby server\".\n\nThe ansible-playbook implemented this step by having a task from the leader node to each replica, triggering the rsync command from the parent directory of both new and old datadirs.\n\n## Regression testing benchmarks\n\nAny migration or database upgrade requires a regression test before performing the final production upgrade. For the team, the database test was a key step in this process, executing performance tests based on the query load from production, captured in the table pg_stat_statements. These were executed in the same dataset - once for the 9.6 version and another iteration for version 11. The process was captured in the following public issues:\n\n * [Preparing the tool](https://gitlab.com/gitlab-com/gl-infra/infrastructure/-/issues/7817)\n * [Creating the test environment](https://gitlab.com/gitlab-com/gl-infra/infrastructure/-/issues/9177)\n * [Capacity planning](https://gitlab.com/gitlab-com/gl-infra/infrastructure/-/issues/9094)\n * [Run the benchmark with JMeter tool](https://gitlab.com/gitlab-com/gl-infra/infrastructure/-/issues/9545)\n\nFinally, based on OnGres work on this benchmark, GitLab will be following up with a new benchmark test for the future:\n\n * [Capacity assessment for our main production DB cluster](https://gitlab.com/gitlab-com/gl-infra/infrastructure/-/issues/10258)\n * [Database capacity and saturation analysis](https://gitlab.com/gitlab-com/gl-infra/infrastructure/-/issues/10340)\n\n### The upgrade process: automate everything\n\nDuring the upgrade project, the upgrade teams have a strong commitment to Infrastructure as Code (IaC) and automation: All the processes had to be fully automated in order to keep any human error to a minimum during the maintenance window. All the steps for pg_upgrade execution are detailed at this [Gitlab pg_upgrade template issue](https://gitlab.com/gitlab-com/gl-infra/db-migration/-/blob/master/.gitlab/issue_templates/pg_upgrade.md).\n\nThe GitLab.com environment is managed by Terraform and Chef. All the automation for the upgrade was scripted via Ansible 2.9 playbooks and roles, where we used two ansible-playbooks to automate the upgrade:\n\nOne [ansible-playbook](https://gitlab.com/gitlab-com/gl-infra/ansible-migrations/-/tree/master/maintenance-mode) controlled the traffic and the applications:\n\n * Put Cloudflare in maintenance and do not receive traffic.\n * Stop HA-proxy\n * Stop the middleware that accesses the database:\n   * Sidekiq\n   * Workhorse\n   * WEB-API\n\nThe second [ansible-playbook](https://gitlab.com/gitlab-com/gl-infra/db-migration/-/tree/master/pg-upgrade) executed the upgrade process:\n\n * Orchestrate all the database and pools traffic\n * Control Patroni cluster and Consul instances\n * Execute the upgrade on the primary and secondary nodes\n * Collect statistics after the upgrade\n * Synchronize the changes using Chef to keep the integrity with our configuration management\n * Verify the integrity and status of the cluster\n * Execute a GCP snapshot\n * Possible rollback process\n\nThe playbook was run interactively task by task, providing the operator with the ability to skip or pause in any given execution point. Every step was reviewed by all the teams that participated in the tests and iterations in staging for the upgrade.\nThe staging environment allowed us to rehearse and find issues using the same procedure that we planned to use in production. After executing and iterating the automated process in staging we reached a quasi-flawless upgrade of PostgreSQL 9.6 to version 11.\n\nTo complete the release, the QA GitLab team reported errors that happened on some of the tests. Find the reference for this work in [this issue note](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/106#note_332170837).\n\n### Pre-upgrade steps of the PostgreSQL\n\nThe first part of the process was the \"pre-upgrade\" section, which deals with the instances reserved for rollback purposes. We did the corresponding analysis to ensure that the new cluster could start with eight out of 12 instances of the fleet without losing throughput, reserving four instances for a potential rollback scenario - where they could be brought as a 9.6 cluster via standard Patroni cluster synchronization.\n\nIt was necessary also in this phase to stop Postgres-dependent services, such as PgBouncer, Chef Client, and Patroni services.\n\nBefore proceeding with the upgrade itself, Patroni had to be signaled to avoid any spurious leader election, take a consistent backup through GCP Snapshots (using corresponding [low-level backup API](https://www.cybertec-postgresql.com/en/exclusive-backup-deprecated-what-now/?gclid=CjwKCAjwltH3BRB6EiwAhj0IUBjiSxBdmS11SUpITLCmk-oPkBa7udOWyA6bK6hig8neaiJc8n1WexoCq8UQAvD_BwE)) and apply the new settings via Chef run.\n\n### The upgrade phase of the PostgreSQL\n\nFirst, we stopped all the nodes.\n\nWe executed these checks:\n\n* pg_upgrade's version check\n* Verify that all the nodes were synchronized and not receiving any traffic.\n\nOnce the primary node data was upgraded, an rsync process was triggered for syncing the data with the replicas. After the upgrade was done, the Patroni service was started up and all the replicas caught up easily with the new cluster configuration.\n\nThe binaries were installed by Chef and the setup of the new cluster on the version was defined in the same MR that would install the extensions used in the database, from GitLab.com.\n\nThe last stage involved resuming the traffic, running an earlier vacuum and finally starting the PgBouncer and Chef Client services.\n\n### The migration day\n\nFinally, fully prepared to perform the production upgrade, the team met on that Sunday (night time for some, and early morning for others) at 08:45 AM UTC. The service would be down for a max of two hours. When the last announcements were sent, the enginering team was given permission to start the procedure.\n\nThe upgrade process began by stopping the traffic and related services, to avoid users getting into the site.\n\nThe graph below shows the traffic and HTTP stats of the service before the upgrade, during the maintenance period (the \"gap\" in the graphs) and after, when the traffic was resumed.\n\n![GitLab.com Commits](https://about.gitlab.com/images/blogimages/pg-up-traf.png)\nGraphs of the traffic on GitLab.com before and after the upgrade maintenance.\n{: .note.text-center}\n\nThe total elapsed time to do the entire job was four hours, it only required [two hours of downtime](https://status.gitlab.com/pages/maintenance/5b36dc6502d06804c08349f7/5ea322c1d1097004ba30d227).\n\n## It's on video\n\nWe recorded the full PostgreSQL upgrade and posted it to GitLab Unfiltered. Warm up the popcorn 🍿\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/TKODwTtKWew\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\nThanks to [Alvaro Hernandez](https://twitter.com/ahachete) and [Sergio Ostapowicz](https://twitter.com/Cepxio_OS) for co-authoring this blog post, as well as the [OnGres team](https://ongres.com) for their contributions and performing the upgrade with the GitLab team.\n\n## References\n\nThe issues used to coordinate this project are public:\n\n* [Upgrade Postgresql to version 11.7 on GitLab.com](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/106)\n* [Execute PostgreSQL upgrade on staging](https://gitlab.com/gitlab-com/gl-infra/infrastructure/-/issues/9592)\n* [OnGres Inc on Twitter](https://twitter.com/ongresinc/status/1259441563614273537)\n* [Scheduled maintenance at GitLab.com](https://status.gitlab.com/pages/maintenance/5b36dc6502d06804c08349f7/5ea322c1d1097004ba30d227)\n\nCover image by [Tim Mossholder](https://unsplash.com/@timmossholder) on [Unsplash](https://unsplash.com/photos/GmvH5v9l3K4)\n{: .note}\n",[1286],{"slug":4910,"featured":6,"template":678},"gitlab-pg-upgrade","content:en-us:blog:gitlab-pg-upgrade.yml","Gitlab Pg Upgrade","en-us/blog/gitlab-pg-upgrade.yml","en-us/blog/gitlab-pg-upgrade",{"_path":4916,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4917,"content":4923,"config":4929,"_id":4931,"_type":16,"title":4932,"_source":17,"_file":4933,"_stem":4934,"_extension":20},"/en-us/blog/is-devops-for-designers",{"title":4918,"description":4919,"ogTitle":4918,"ogDescription":4919,"noIndex":6,"ogImage":4920,"ogUrl":4921,"ogSiteName":692,"ogType":693,"canonicalUrls":4921,"schema":4922},"Can DevOps be beneficial for design and UX?","Look at how DevOps phases can be integrated with design and UX, and why we've built the Figma plugin to help with this.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681548/Blog/Hero%20Images/GitLab-Figma-header.png","https://about.gitlab.com/blog/is-devops-for-designers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Can DevOps be beneficial for design and UX?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jordi Mon\"}],\n        \"datePublished\": \"2020-09-03\",\n      }",{"title":4918,"description":4919,"authors":4924,"heroImage":4920,"date":4926,"body":4927,"category":14,"tags":4928},[4925],"Jordi Mon","2020-09-03","\n\nAccording to two legends on the field of Design, [Don Norman](https://en.wikipedia.org/wiki/Don_Norman) and [Jakob Nielsen](https://en.wikipedia.org/wiki/Jakob_Nielsen_(usability_consultant)), a successful user experience occurs when the user can fulfill his or her needs. A product designed with high UX standards in mind should have enough functionality and self-explanatory visual information for all its users to complete their tasks without help.\n\nGitLab is a complete [DevOps platform](/topics/devops-platform/) – meaning, good UX within GitLab equals good developer experience (DX). Following Nielsen and Norman's argument, good DX is the ability to not only use the product’s UI to serve a dev's needs, but also to find good documentation in context, a versatile API, and general compatibility with their working environment. Considering this succinct description of the GitLab app, one could easily infer that all its users are either software developers or system administrators, right?\n\nHowever, this assertion isn’t entirely true. There's no doubt that developers and operators are still the protagonists of DevOps, but more and more people from other professions (including graphic design, research, marketing, and even psychology) are contributing to software building. At GitLab, we acknowledge that in our vision.\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">We&#39;ve got a vision for our product and \u003Ca href=\"https://twitter.com/CLenneville?ref_src=twsrc%5Etfw\">@CLenneville\u003C/a>, VP of UX at GitLab, is sharing it live at \u003Ca href=\"https://twitter.com/hashtag/GitLabCommit?src=hash&amp;ref_src=twsrc%5Etfw\">#GitLabCommit\u003C/a>. \u003Ca href=\"https://t.co/if4xVWgxqT\">pic.twitter.com/if4xVWgxqT\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1298911891352367104?ref_src=twsrc%5Etfw\">August 27, 2020\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nWe're already taken the first strides in this long-term vision for the GitLab product, and they aim to welcome designers to [DevOps](/topics/devops/). This post describes the first steps taken by GitLab's Product team to connect DevOps with design.\n\n## Is DevOps for designers?\nVisual design for applications is a completely different field than application development. For starters, designers work with designs, screens, user flows, prototypes, and so many other graphic assets, while developers only use [source code](/solutions/source-code-management/). Their workflows are also pretty different: While devs may find enough solace in push, pull, merge, and other operators useful to their daily routines with code, visual designers may require other sets of features that allow them to communicate, receive, and apply feedback on designs.\n\nIs a platform like GitLab a good place for designers to try DevOps? We think so. One of the foundations of DevOps is that [cross-functional teams deliver better products faster](http://cloudplatformonline.com/rs/248-TPC-286/images/DORA-State%20of%20DevOps.pdf). If that is the case, then why keep designers' collaboration platforms separate? Why make their workflows independent and disconnected, and why hand off deliverables when it should be all about constant iteration with handovers?\n\n[Figma](https://www.figma.com/) is a vector graphics editor and prototyping tool. Figma founder and CEO, [Dylan Field](https://twitter.com/zoink?ref_src=twsrc%5Egoogle%7Ctwcamp%5Eserp%7Ctwgr%5Eauthor), crunched some numbers and discovered that the designers: developer ratio had increased considerably among top players.\n\n![2.5x increase in desginer to dev ratio](https://about.gitlab.com/images/blogimages/2020-09-03-is-devops-for-designers-figma-plugin/designer-to-dev-ratio.png){: .shadow.center}\nScreenshot from [TechCrunch](https://techcrunch.com/2017/05/31/here-are-some-reasons-behind-techs-design-shortage/)\n{: .note.text-center}\n\nWhen we mention \"top players,\" we're not talking about adaptable, flexible startups. Quite the contrary, in fact.\n\n> \"The companies willing to go on the record were mostly enterprise, so this sample doesn’t even include the consumer startups that famously focus on design, like Airbnb. Facebook staffers told us the social network has quadrupled its designer hiring target in the last two years alone - but Facebook wouldn’t officially comment.\" - Dylan Field wrote in [TechCrunch](https://techcrunch.com/2017/05/31/here-are-some-reasons-behind-techs-design-shortage/)\n\nAt GitLab, we've learned that frictionless feedback loops are the best way to validate our work. The feedback loop is fastest when designers can work hand in hand with the developers that create the source code that will later give life to their visuals.\n\n## Let DesignOps connect with DevOps: GitLab ❤️ Figma\n\nWe want designers to work in GitLab, which is why we created a new product category called [Design Management](/direction/plan/design_management/#introduction) that strives to make Designers welcome within GitLab and support their workflows. The first step in this direction is to change the dreaded handoff to a more iterative handover that will more accurately capture the feedback loops of the last part of the design workflow. How Design Management works at large will be the subject of another, in-depth blog post coming soon. You can catch a brief sneak peek on [YouTube](https://youtu.be/5oo0m3s5Gfk).\n\nWe developed a plugin to connect GitLab to Figma, to simplify the handover process. Now, you can upload one or multiple frames to any issue. From then on designers, PMs, and engineers can discuss the designs within GitLab.\n\nNext, we explain why we picked Figma and then dive deeper into how to install and use the plugin.\n\n## Why did we choose to integrate with Figma?\n\nWatch the video below as [Jeremy Elder](/company/team/#jeldergl), senior product designer, FE/UX Foundations, Visual Design, explains why we chose Figma as the main tool for Product Designers in GitLab.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/Qa9M74CfuXY?start=650&end=1040\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\nOnce Product Design was comfortable using Figma to work on GitLab's design, the decision to build a plugin came naturally, considering how much we value [dogfooding](https://handbook.gitlab.com/handbook/values/#dogfooding): Why not make the transition from Figma to GitLab much easier? GitLab team members are heavy Figma users ourselves (our Figma community is [here](https://www.figma.com/@GitLab)) and you can see how we use it for product design below:\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe style=\"border: 1px solid rgba(0, 0, 0, 0.1);\" width=\"800\" height=\"450\" src=\"https://www.figma.com/embed?embed_host=share&url=https%3A%2F%2Fwww.figma.com%2Fproto%2F73OcYdBfOaK2xlChC3tbNX%2FFigma-for-GitLab%3Fnode-id%3D2%253A61%26scaling%3Dscale-down&chrome=DOCUMENTATION\" allowfullscreen>\u003C/iframe>\n\u003C/figure>\n\nGitLab's own community was requesting we build the Figma plugin.\n\n> “When will the plugin be published? Because our entire development team works on Linux\n> machines and can't run the Desktop application. When is this plugin going to be published so > it would be possible also for the users with Linux-based systems, which are more or less\n> forced to use the Web app, to use this plugin? I think, this would bring both, Figma and GitLab, generally a huge step forward.” – Community member [Emanuel Bennici](https://gitlab.com/l0nax) commented in the ([issue](https://gitlab.com/gitlab-org/gitlab-figma-plugin/-/issues/2#note_371842296))\n\n>“I also work on Linux and this would be a huge improvement for me and my company.” – Community member [Gabriel Jann](https://gitlab.com/JAIABRIEL) commented in the ([issue](https://gitlab.com/gitlab-org/gitlab-figma-plugin/-/issues/2#note_371844752))\n\n## How do I get started with the Figma plugin?\n\n First and foremost [download the plugin](https://gitlab.com/gitlab-org/gitlab-figma-plugin) and get going with the first steps in the [User Guide](https://gitlab.com/gitlab-org/gitlab-figma-plugin/-/wikis/home). In the video below, [Christen Dybenko](/company/team/#cdybenko), Design Management PM, walks you through the installation and the first steps with the plugin in GitLab:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/KR2nuehGtrU\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## What's next?\n\nTell us about your experience using the plugin by commenting on the [issue](https://gitlab.com/gitlab-org/gitlab-figma-plugin/-/issues/44).\n\nQuestions about the future of Design Management? Wondering about how it fits into our broader DevOps scheme? Check our [next steps](/direction/plan/design_management/#whats-next--why) and [long term strategy for Design Management](/direction/plan/design_management/#long-term-strategy).\n",[959,4300,1144],{"slug":4930,"featured":6,"template":678},"is-devops-for-designers","content:en-us:blog:is-devops-for-designers.yml","Is Devops For Designers","en-us/blog/is-devops-for-designers.yml","en-us/blog/is-devops-for-designers",{"_path":4936,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4937,"content":4943,"config":4949,"_id":4951,"_type":16,"title":4952,"_source":17,"_file":4953,"_stem":4954,"_extension":20},"/en-us/blog/using-bazel-to-speed-up-gitlab-ci-builds",{"title":4938,"description":4939,"ogTitle":4938,"ogDescription":4939,"noIndex":6,"ogImage":4940,"ogUrl":4941,"ogSiteName":692,"ogType":693,"canonicalUrls":4941,"schema":4942},"How to use Bazel with GitLab to speed up your builds","We explain why Bazel and GitLab CI are a great match to speed up your build times.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667132/Blog/Hero%20Images/build-container-image-runner-fargate-codebuild-cover.jpg","https://about.gitlab.com/blog/using-bazel-to-speed-up-gitlab-ci-builds","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use Bazel with GitLab to speed up your builds\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jason Yavorska\"}],\n        \"datePublished\": \"2020-09-01\",\n      }",{"title":4938,"description":4939,"authors":4944,"heroImage":4940,"date":4946,"body":4947,"category":14,"tags":4948},[4945],"Jason Yavorska","2020-09-01","\n[Bazel](https://bazel.build/) is a useful tool that can be used with GitLab CI to push your build pipelines into overdrive.\n\nFor maximum correctness, [CI/CD](/topics/ci-cd/) systems will usually rebuild all of the artifacts from scratch on every run. This method is considered safer since artifacts from one pipeline won't negatively impact subsequent pipelines, and is a lesson learned from older CI tools where the agent state was persistent over time – so you never really knew if you could do a build from scratch. The problem with redoing everything every time though, is that it's slow. GitLab improves upon this by using caches and shared artifacts, but there's only so far that approach can take you.\n\nBazel is a good example of tackling things in a different way – it speeds up builds by only rebuilding what is necessary. On the surface, this might sound a lot like just having a cache and doing an incremental build. But the main difference is that Bazel is really good at not only being fast, but also [correct](https://docs.bazel.build/versions/3.4.0/guide.html#correct-incremental-rebuilds). Bazel is much more reliable than traditional `Makefiles` or build scripts, which are notorious for occasionally forcing you to `make clean` because they get into some inconsistent state they can't recover from.\n\nAs of now, Bazel supports building Java, C, C++, Python, and Objective-C, and can also produce packages for deployment on Android or iOS. More capabilities are being added all the time, as well as open source rule sets for other languages like Go, Scala and many more, so be sure to check their latest [product overview](https://docs.bazel.build/versions/3.4.0/bazel-overview.html) for updates.\n\n## Setting up Bazel builds in GitLab CI\n\nSetting up Bazel for builds is very straightforward. A job like the following does everything you need:\n\n```yaml\nvariables:\n  BAZEL_DIGEST_VERSION: \"f670e9aec235aa23a5f068566352c5850a67eb93de8d7a2350240c68fcec3b25\" # Bazel 3.4.1\n\nbuild:\n  image:\n    name: gcr.io/cloud-marketplace-containers/google/bazel@sha256:$BAZEL_DIGEST_VERSION\n    entrypoint: [\"\"]\n  stage: build\n  script:\n    - bazel --output_base output build //main/...\n  artifacts:\n    paths:\n      - bazel-bin/main/hello-world\n  cache:\n    key: $BAZEL_DIGEST_VERSION\n    paths:\n      - output\n```\n\nWhat this script does is define a job called `build` which uses the official Google Bazel image. We track the digest version for two reasons: First, to ensure immutability (tags can be updated), and second to use it as a cache key so that the cache is invalidated whenever we upgrade the Bazel version. We also override the entry point because we want to pass our own parameters to our `bazel` invocation. The second parameter is the [label](https://docs.bazel.build/versions/master/glossary.html#label) of the [target](https://docs.bazel.build/versions/master/glossary.html#target) we want to build. A [target pattern](https://docs.bazel.build/versions/master/glossary.html#target-pattern) can also be used here to tell Bazel to build multiple things (and what they depend on), rather than one thing (and what it depends on).\n\nThe first parameter (`--output_base output`) is to help Bazel work with a security feature of the GitLab runner. By default, the runner will [not access files outside of the build dir](https://docs.gitlab.com/ee/ci/yaml/#artifactspaths), but Bazel places its own cache outside by default. This parameter tells Bazel to place it inside, where the runner can access it. The next two sections (`artifacts` and `cache`), tell the runner where the output file you want to keep is, and importantly for Bazel, where the cache is that you want to persist. Note that until [this issue to allow for traversing symlinks](https://gitlab.com/gitlab-org/gitlab/-/issues/19746) is resolved, you must give the full path to the specific outputs you want to keep within the `bazel-bin` folder.\n\nWhen this job runs, it places the current cache (if it exists, and only for the current `BAZEL_DIGEST_VERSION`) in the `output` folder, and then runs `bazel` to build the `main:hello-world` target. It saves the artifact from `bazel-bin/main/hello-world`, and then caches everything in `output` for the next run.\n\n### Bazel: notes on caching\n\nIn this example we've set up Bazel to work with GitLab caching, and this is how we currently use it internally. If you already have Bazel remote cache (or even better, Bazel remote execution), there is no need to set up GitLab CI cache: It actually would likely make things slower since in that case there is no need to download and unpack the cache at all. Setting up remote caching or remote execution are more advanced and outside of the scope of this article, but are even better ways to speed up the build. Until then, using a GitLab cache can be a good interim step. If you're interested in learning more about remote cache/remote execution, this [BazelCon video](https://www.youtube.com/watch?v=MyuJRUwT5LI&t=1017s) or Bazel's official [documentation on remote caching](https://docs.bazel.build/versions/master/remote-caching.html) may be helpful.\n\n## Building and testing with Bazel\n\nUsing Bazel to run your tests is just as easy, and there are nice benefits to doing so. If you can rely on accurately knowing what has changed, you can be more selective in doing incremental tests and have the confidence that tests that were skipped were truly unnecessary. This is also quite easy to set up using Bazel, but one thing to consider is that running builds and tests all at once (rather than splitting build and test into different jobs) is going to be more efficient. You can do that by using a build job that looks like this:\n\n```yaml\nvariables:\n  BAZEL_DIGEST_VERSION: \"f670e9aec235aa23a5f068566352c5850a67eb93de8d7a2350240c68fcec3b25\" # 3.4.1\n\nbuild:\n  image:\n    name: gcr.io/cloud-marketplace-containers/google/bazel@sha256:$BAZEL_DIGEST_VERSION\n    entrypoint: [\"\"]\n  stage: build\n  script:\n    - bazel --output_base output test //main/...\n  artifacts:\n    paths:\n      - bazel-bin/main/hello-world\n  cache:\n    key: $BAZEL_DIGEST_VERSION\n    paths:\n      - output\n```\n\nIn a build that includes all tests, you typically want to run everything that changed. That's usually done using an invocation like `bazel test //main/...` which:\n\n1. Finds all targets (referred to as `...`) in the workspace location (`//` denotes the root of the [workspace](https://docs.bazel.build/versions/master/glossary.html#workspace)), so we are referring to `main` relative to the root.) Note that you probably don't want to include a bare `//` (without `main`), since that will include the custom `output` folder and that is probably not what you intended.\n1. Builds usual targets.\n1. Builds test targets.\n1. Runs test targets.\n\nOnly using the `test` parameter works because `bazel test` not only runs tests, but also builds everything that matched the target pattern by default. Individual targets can be excluded from being matched by `...` by applying a `manual` tag to them ([see `tags` in the Bazel glossary table](https://docs.bazel.build/versions/master/be/common-definitions.html#common-attributes)). One callout - in the example project we're building ([details below](#examples)), there actually aren't any tests, so this fails because we requested a test pass and there weren't any. If your project has tests in it, it will work fine.\n\n## Examples using Bazel\n\nWe're actually using Bazel here at GitLab to build our [GitLab Agent for Kubernetes](https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent). If you're interested in seeing a more complex, complete implementation using Bazel then that's a great one to explore. The simple example from this blog can be found live in [my own personal project](https://gitlab.com/jyavorska/testbazel), and it is based on the [stage three build tutorial](https://docs.bazel.build/versions/3.4.0/tutorial/cpp.html) from Bazel's own documentation.\n\nBazel itself is also highly configurable through its own `.bazelrc`, `BUILD` files, and more. The [user documentation for Bazel](https://docs.bazel.build/versions/master/guide.html) contains several examples along with an exhaustive configuration reference.\n\n## What's next with Bazel?\n\nWe are considering using Bazel in few more areas within GitLab:\n\n- In an ideal world, after a minor change, the build and test should only take a few seconds to complete. When the jobs are fast enough, it could even be triggered via an editor on every change before being committed to git at all. This kind of capability could be integrated with the Web IDE, giving you immediate insight into the results of your change. We have an issue related to [making it easier to run pipelines from the Web IDE](https://gitlab.com/gitlab-org/gitlab/-/issues/213604) that could take advantage of this.\n- By default, GitLab uses [a gem we created](https://gitlab.com/gitlab-org/ci-cd/test_file_finder/) (which is available in this [template](https://docs.gitlab.com/ee/ci/testing/fail_fast_testing.html) for test execution optimization, but all we're doing so far is running the riskiest tests first. As Bazel grows and adds support for more languages, it could potentially become a standard for this purpose, allowing you to run even fewer tests (and among those, the riskiest ones first). We have an [epic](https://gitlab.com/groups/gitlab-org/-/epics/4121) where you can track progress toward this idea.\n- Finally, Bazel also supports distributed builds and caching, opening the door to autoscaling compilation and test capacity alongside runner capacity, or even sharing the same capacity for whatever jobs are needed at a given moment. This function would require managing your own capacity for this purpose, but in the future we could imagine this being added to GitLab. We have an [issue for exploring different ways Bazel could support distributed jobs](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26663) using the GitLab Runner.\n\n## Tell us your Bazel success stories\n\nAre you using Bazel with GitLab CI? We'd love your feedback on what features we could add to make things work better and hear about the performance gains you've found from the combo. Please let us know in the Meta issue below, or contact [Jason Yavorska](https://twitter.com/j4yav) on Twitter.\n\n## Related content\n\n- [Bazel website](https://bazel.build/)\n- [Meta issue for deeper integration in GitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/201484)\n- [Bazel blog on integrating it with CI systems](https://blog.bazel.build/2016/01/27/continuous-integration.html)\n- [GitLab CI quick start](https://docs.gitlab.com/ee/ci/quick_start/)\n\nCover image by [Lucas van Oort](https://unsplash.com/@switch_dtp_fotografie) on [Unsplash](https://unsplash.com)\n{: .note}\n",[110,232,726],{"slug":4950,"featured":6,"template":678},"using-bazel-to-speed-up-gitlab-ci-builds","content:en-us:blog:using-bazel-to-speed-up-gitlab-ci-builds.yml","Using Bazel To Speed Up Gitlab Ci Builds","en-us/blog/using-bazel-to-speed-up-gitlab-ci-builds.yml","en-us/blog/using-bazel-to-speed-up-gitlab-ci-builds",{"_path":4956,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4957,"content":4963,"config":4969,"_id":4971,"_type":16,"title":4972,"_source":17,"_file":4973,"_stem":4974,"_extension":20},"/en-us/blog/measuring-engineering-productivity-at-gitlab",{"title":4958,"description":4959,"ogTitle":4958,"ogDescription":4959,"noIndex":6,"ogImage":4960,"ogUrl":4961,"ogSiteName":692,"ogType":693,"canonicalUrls":4961,"schema":4962},"How we measure engineering productivity at GitLab","Learn about how we measure and iterate through this metric","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681533/Blog/Hero%20Images/background.jpg","https://about.gitlab.com/blog/measuring-engineering-productivity-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we measure engineering productivity at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Clement Ho\"}],\n        \"datePublished\": \"2020-08-27\",\n      }",{"title":4958,"description":4959,"authors":4964,"heroImage":4960,"date":4966,"body":4967,"category":14,"tags":4968},[4965],"Clement Ho","2020-08-27","\n\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2020-09-02.\n{: .alert .alert-info .note}\n\nOne of the challenges in a rapidly growing engineering organization is determining how your organization's productivity scales over time. Companies that grow quickly often face a slow down in output because of inefficiencies and communication challenges. For example, a task that you used to be able to ask another coworker to do may now need a comprehensive approval flow.\n\nAt GitLab, we went from 100 to 280 engineers in 1.5 years. As a startup, it was critical that we continued our momentum of:\n\n![Shipping monthly releases => Provide more value to users => Increasing revenue](https://about.gitlab.com/images/blogimages/measuring-engineering-productivity/momentum.png){: .shadow.center}\n\nAs a result, we created several [Key Performance Indicators](/company/kpis/#what-are-kpis) (KPIs) and Performance Indicators (PIs) around this:\n\n- [Throughput](/handbook/engineering/development/performance-indicators/#throughput)\n- [Product MRs Review to Merge time (RTMT)](/handbook/engineering/development/performance-indicators/#review-to-merge-time-rtmt)\n- [Development Department Member MR Rate](/handbook/engineering/development/performance-indicators/#development-department-member-mr-rate)\n- [Say Do Ratio](/handbook/engineering/development/performance-indicators/#say-do-ratios)\n- [Product MRs by Type](/handbook/engineering/development/performance-indicators/#product-mrs-by-type)\n\nThe primary one that is often discussed in engineering leadership at GitLab is [Merge Request (MR)](/solutions/continuous-integration/) Rate.\n\nIn this blog post, I'll take a deep dive into how we measure engineering productivity at GitLab using MR Rate, the challenges we've encountered, and what we do to increase this metric. I hope that through this, you'll have a deeper understanding of how we operate at GitLab and inspire you to reflect on how your organization measures engineering productivity.\n\n## What is MR Rate?\n\n![MR Rate = (Total MRs for a team in a given month)/(number of team members employed during that month)](https://about.gitlab.com/images/blogimages/measuring-engineering-productivity/mr-rate-formula.jpeg){: .shadow.center}\n\n**Note:** We include management roles in the team count because we want this metric to be a team metric and want managers to be accountable for their team's metric.\n\nWe use this metric because:\n\n1. We want to incentivize everyone to [iterate](https://handbook.gitlab.com/handbook/values/#iteration) and break down work into smaller MRs because smaller MRs have a faster review time and get merged faster (better developer and maintainer review experience)\n1. The quicker we can deliver features to users, the faster we can iterate upon them\n1. Every MR into the codebase improves the codebase, and every improvement has the downstream effect of making the product better\n\nWhen viewed at an organization level, this metric helps us understand how productivity in the organization changes over time. Although this metric seems simple, it actually requires a lot of detailed analysis as there are many situations to examine:\n\n- New team vs. established team\n- Team performance issues (blocking work or incorrect iteration work breakdown)\n- Individual growth (and performance management)\n- [Community contributions](/handbook/marketing/developer-relations/contributor-success/) vs. independent team contributions\n- Operational productivity constraints\n\nAt first, we measured MRs based on labels associated with the product domain (which generally maps to an existing engineering team). As an open core company, this allowed us to easily aggregate community contributions into the metric. We wanted to account for them because we want to continue encouraging team members to support community contribution MRs and recognize that these MRs continue to help provide the product with more value to users.\n\nUnfortunately, as our organization grew over time, this metric became confusing. Although we had a bot that would label MRs, we occasionally had bad data and mislabeled MRs. In addition, certain teams with product areas that were more mature had more community contributions than others. The combination of these issues evolved the metric into multiple types.\n\n- MR Rate measured through labeling\n- Team MR Rate measured through MR authorship (also known as Narrow MR Rate)\n\nIt's likely that over time this may continue to evolve but for now, these new types of MR Rates have brought more clarity within our organization.\n\n## What are the challenges with MR Rate?\n\nThere are many challenges, but we'll highlight a few notable ones.\n\nFirst of all, one metric never tells the full story. One of the challenges we faced as we hyper focused on this metric was being biased to the number given by the metric rather than truly understanding the story surrounding the metric. For example, a team with a high MR Rate could be shipping quantity over quality. By the MR Rate measurement alone, the organization could unintentionally exemplify teams with unstable features.\n\nIn order to avoid these types of situations, we first ensure that we clearly define our [Definition of Done](https://docs.gitlab.com/ee/development/contributing/merge_request_workflow.html#definition-of-done) and our [maintainer](/handbook/engineering/workflow/code-review/#maintainer) [review process](https://docs.gitlab.com/ee/development/code_review.html). This allows us to set a baseline for quality so that we can set clear expectations in the organization and create clear guidance when MRs are below our standards for quality.\n\nIn addition, we also use other metrics to get a fuller understanding of the story and we regularly introspect about our numbers. We intentionally accompany MR Rate with a few other metrics such as [Product MRs by Type](/handbook/engineering/development/performance-indicators/#product-mrs-by-type) to better understand the distribution of MRs and [Say Do Ratio](/handbook/engineering/development/performance-indicators/#say-do-ratios) (this is our latest addition, we're still iterating on it) to better understand how the teams are performing relative to what they committed with product management during the development milestone. We generally use MR Rate to observe trends and regularly ask ourselves, “why is this trending down?” as well as “why is this trending up so much? Is there something that this team is doing that other teams can learn from?”. These are some techniques we use to keep ourselves accountable for understanding the broader picture of the metric.\n\nAnother challenge we faced with MR Rate is balancing it between a team vs. individual metric. As an organization, we want MR Rate to trend upwards over time, and we want to hold engineering leaders accountable for their teams. Engineering directors are responsible for their (organization) sub-department's metrics, and engineering managers are responsible for their team's metrics respectively.\n\nWe intentionally chose not to make MR Rate an individual metric because we do not want to encourage siloed, non-collaborative behavior. For example, we do not want a team member to feel disincentivized to review other team members' MRs or unblock others. This is especially important because [collaboration](https://handbook.gitlab.com/handbook/values/#collaboration) is a company value. Although actions such as making an MR Rate leaderboard could potentially increase the metric for the organization, we have intentionally chosen not to do that because we want to encourage collaboration. We also chose not to use MR Rate as a metric for a team member's underperformance.\n\nThis conscious decision is tricky (especially for smaller teams) because it can be rather difficult for engineering managers to increase their team's MR Rate trends without discussing individual metrics. When teams have less team members, each team member's total MRs in a month would be more impactful to the team's overall MR Rate compared to a larger team. Different teams have attempted to address this in different ways which we will explain in the next section.\n\n## How do we increase MR Rate?\n\nWe use four primary strategies to increase MR Rate.\n\n1. Improving iteration\n1. Setting KPIs\n1. Setting goals (OKRs) to increase KPI\n1. Empowering teams to improve efficiencies\n\nImproving iteration is our primary strategy because team members who are better at iterating are able to create smaller MRs, which results in a higher MR Rate. In our experience, iteration is easy to conceptualize but difficult to apply. Our organization put together some resources (including a [training template](https://gitlab.com/gitlab-com/Product/-/blob/master/.gitlab/issue_templates/iteration-training.md)), and our CEO has set up Iteration Office Hours as an opportunity to coach (most of which are also available publicly on [YouTube](https://www.youtube.com/c/GitLabUnfiltered/search?query=iteration+office+hours+with)).\n\nFrom an organizational perspective, we use KPIs to monitor our MR Rate. Our organization tracks our [Development Department Narrow MR Rate](/handbook/engineering/development/performance-indicators/#development-department-narrow-mr-rate) as our primary KPI with a description, a chart with current and historical data, and a predefined target. As of writing this article, our target is 10, and we are trending toward that target over time.\n\n![Development Department Narrow MR Rate](https://about.gitlab.com/images/blogimages/measuring-engineering-productivity/dept-mr-rate.png){: .shadow.center}\n\n_KPI chart as of August 24, 2020_\n\nEach sub-department under the development department also has their dashboards available publicly (though these dashboards are not as organized and easy to find as the KPI). For example, the Ops sub-department tracks this on their specific [handbook page](/handbook/engineering/development/ops/#ops-sub-department-performance-indicators). We are currently working on consolidating these charts. These KPI dashboards make it easy to understand how the organization is performing and allow us to keep it top of mind.\n\nIn addition to KPIs, each fiscal quarter, engineering management uses these indicators to determine how to set OKRs. In previous quarters, OKRs were set to raise MR Rate to higher targets. This quarter's goal, in light of COVID's long lasting implications, is to maintain the target, because we understand that the current situation is affecting everyone differently. OKRs help align the organization toward the same goals so that everyone understands and can contribute to these goals.\n\nFrom a team perspective, we also empower our engineering managers to experiment with processes to improve efficiency but stay mindful of maintaining healthy work life balance. Some engineering managers choose to use individual MR Rate values as a means of coaching and understanding more about each team member's merge requests. For example, a team member may have a lower MR Rate because he/she is a maintainer, and because of the number of MR reviews received, is unable to have completed as many MRs as he/she could do. Some teams also look through their team's MR Rate on a weekly basis and provide commentary to their directors as a means of understanding more about the metric in order to improve it over time.\n\n## Recap\n\nThe MR Rate is how we've chosen to measure and increase engineering productivity at GitLab. It's not perfect, but we're constantly iterating to make it better. We have yet to determine what our ceiling is or whether we've already reached it but we will definitely share with the wider community when we get to that point. What metrics do you use to measure your organization's engineering productivity? Do you have suggestions or comments about MR Rate? Leave a comment below, and we'll read through them and do our best to respond.\n\n# Special thanks\n\nThanks to the following engineering leaders at GitLab who opened up their calendars to share their insights on this topic:\n\n- [Eric Johnson](/company/team/#edjdev), executive vice president of Engineering\n- [Christopher Lefelhocz](/company/team/#clefelhocz1), vice president of Development\n- [Wayne Haber](/company/team/#whaber), director of Engineering, Threat Management\n- [Sam Goldstein](/company/team/#sgoldstein), director of Engineering, Op\n- [Tim Zallmann](/company/team/#timzallmann), director of Engineering, Dev\n- [Chun Du](/company/team/#cdu1), director of Engineering, Enablement\n- [Bartek Marnane](/company/team/#bmarnane), director of Engineering, Growth\n- [Todd Stadelhofer](/company/team/#tstadelhofer), director of Engineering, Secure\n- [Darby Frey](/company/team/#darbyfrey), senior manager, Engineering, Verify\n- [Daniel Croft](/company/team/#dcroft), senior manager, Engineering, Package and Release\n\nCover image by [Frank Mckenna](https://unsplash.com/@frankiefoto) on [Unsplash](https://unsplash.com/photos/4V8JxijgZ_c)\n{: .note}\n",[915],{"slug":4970,"featured":6,"template":678},"measuring-engineering-productivity-at-gitlab","content:en-us:blog:measuring-engineering-productivity-at-gitlab.yml","Measuring Engineering Productivity At Gitlab","en-us/blog/measuring-engineering-productivity-at-gitlab.yml","en-us/blog/measuring-engineering-productivity-at-gitlab",{"_path":4976,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4977,"content":4983,"config":4990,"_id":4992,"_type":16,"title":4993,"_source":17,"_file":4994,"_stem":4995,"_extension":20},"/en-us/blog/align-engineering-security-appsec-tests-in-ci",{"title":4978,"description":4979,"ogTitle":4978,"ogDescription":4979,"noIndex":6,"ogImage":4980,"ogUrl":4981,"ogSiteName":692,"ogType":693,"canonicalUrls":4981,"schema":4982},"How Developer-Centric AppSec Testing Transforms DevOps Teams","Find and fix security bugs faster by implementing developer-centric application security testing in the CI pipeline. And the bonus? Engineering and security will finally be better aligned.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681513/Blog/Hero%20Images/stackhawk.jpg","https://about.gitlab.com/blog/align-engineering-security-appsec-tests-in-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How developer-centric AppSec testing can dramatically change your DevOps team\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Joni Klippert\"}],\n        \"datePublished\": \"2020-08-21\",\n      }",{"title":4984,"description":4979,"authors":4985,"heroImage":4980,"date":4987,"body":4988,"category":14,"tags":4989},"How developer-centric AppSec testing can dramatically change your DevOps team",[4986],"Joni Klippert","2020-08-21","\n\nSoftware development has accelerated dramatically over the past decade. As [DevOps](/topics/devops/) became pervasive, companies went from shipping software monthly to shipping software to production frequently throughout the day. This happened as engineering teams took ownership of the deployment, performance, and resilience of their software. \n\nAnd it has paid off. Companies that have adopted DevOps are deploying software significantly faster, ultimately driving business value as innovation is more rapidly delivered to customers.\n\nSecurity, however, did not keep up. Security teams typically fell into one of two positions - the blocker of frequent deployments or the team perpetually bringing up issues in last month’s work. The need for a shift in the security model is widely known. It was the subject of the [2019 Black Hat Conference keynote](https://www.blackhat.com/us-19/briefings/schedule/index.html#every-security-team-is-a-software-team-now-17280), stats from GitLab’s [2020 Global DevSecOps Survey](https://about.gitlab.com/resources/downloads/2020-devsecops-report.pdf) make this obvious, and we’ve [shared our opinions](https://www.stackhawk.com/blog/application-security-is-broken/) at StackHawk.\n\nI believe there is a solution (or at least a *huge* step in the right direction)... developer-centric [application security](/topics/devsecops/) tooling in the CI pipeline.\n\n## The CI pipeline aligns engineering and security\n\nWhile some in the industry have been debating the term DevSecOps, leading companies have started adopting developer-first security tooling that brings alignment through the CI pipeline. Instrumented correctly, it ensures that security bugs are caught before they hit production and that the fix cycle is drastically shortened.\n\nThe legacy model has security teams running application security tests against production environments. These sort of checks are great if they are your backstop. But if this is the primary way of assessing your application’s security posture, you need to catch up with modern engineering practices. \n\nModern teams are running checks on each microservice that makes up the customer facing application, catching bugs in pipeline, and equipping developers with the information to self serve fixes and triage issues. Fix times are significantly shorter, as developers are still in the context of the code they were working on. By testing microservices vs. the end state application, the underlying bugs are much easier to find and fix. And with developer-centric tooling, developers can fix bugs themselves instead of cycling through siloed internal processes. This structure better aligns each function with their best skill sets. Engineers know the application the best and are most equipped to fix, and security teams are able to focus on strategy instead of Jira ticket creation.\n\nThe key is to get the instrumentation right (read: don’t break the build for stupid stuff).\n\n## Application security tests in CI\n\nThat sounds great in theory, but what does it look like in practice? Getting started is actually more simple than it seems. We suggest adding three application security tests to start:\n\n## Software composition analysis (SCA)\n\nSCA identifies the open source dependencies in your code base and compares that against a database of known security vulnerabilities. Some tools automatically create pull requests to patch outdated libraries. Open source use is exponentially growing, especially with chained dependencies. SCA is incredibly important, but also can be noisy with non-exploitable findings.\n\nSome of the leading vendors in the space are [GitLab](/) and [Snyk](https://snyk.io/), with up and comers like [FOSSA](https://fossa.com/) also worth paying attention to.\n\n## Dynamic application security testing (DAST)\n\nDAST runs security tests against your running application, from localhost to CI to production. The beauty of DAST is that it most closely resembles what an attacker would see, by attacking your running application and reducing false positives. The two things to be sure of as you start testing with DAST is that your scanner is finding all of your paths and API endpoints and that it is able to scan as an authenticated user.\n\nGitLab provides DAST checks for Ultimate tier customers. If you want more robust scanning options and additional functionality to manage and fix bugs, [StackHawk](https://www.stackhawk.com) is the only place to turn (obviously I’m biased here). Other solutions include legacy vendors such as [Rapid7](https://www.rapid7.com/) or open source leader [ZAP](https://www.zaproxy.org/).\n\n## Secrets detection\n\nFinally, you’ll want to ensure that you have detection for leaked secrets in code. This tooling looks for credentials, keys, or other secrets that may have unintentionally been committed to the code base by developers. GitLab includes [secret detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/) in their GitLab Ultimate security tooling.\n\n## Getting started\n\nOftentimes, the thought of adding application security tests to the development workflow feels insurmountable. With a long list of priorities, engineering leadership will sometimes put this off. The reality, however, is that it is not that hard.\n\nAt StackHawk, we see many customers completing their first successful scans within 15 minutes of sign up and instrumentation in CI is literally as easy as adding [a few lines of YAML](https://docs.stackhawk.com/continuous-integration/) to your build.\n\nHere is our recommended playbook of how to get started with AppSec in CI. While this is specific to StackHawk, the principles can be applied to other tools as well.\n\n### Step 1: local testing and config\nAfter signing up and grabbing your API key, start iterating on [configuration](https://docs.stackhawk.com/hawkscan/configuration/) while testing against your application on localhost. This allows you to quickly adjust config and get successful authenticated scans running.\n\n### Step 2: non-blocking CI instrumentation\nAfter you’ve ironed out the configuration locally, add the test to your CI pipeline. At this point, it is strongly recommended to instrument as a non-blocking test so that you can triage any existing findings and smooth out any kinks.\n\n#### Step 3: bug triage - fix critical issues in flight, backlog and discuss the rest\nAfter your first non-blocking CI run, start triaging any initial findings. Any bugs marked as High criticality should likely be fixed with some sense of urgency. Lows and Mediums should be triaged depending on your application and the bugs, either quickly addressed or added to a backlog for review. Existing findings should not be the blocker for you instrumenting checks to ensure that new bugs don’t get shipped to production.\n\n#### Step 4: switch to blocking tests\nAfter ironing out config locally and in CI, and then triaging initial findings, it is time to finalize the roll out. Switch the StackHawk test to blocking mode to ensure that new security bugs don’t hit production. You can set the scanner to break on High or Medium and High, which depends on your business and the nature of the application. With this in place, you can be confident that production-ready applications have been scanned for security.\n\n## Cultural shifts: it is more than CI\nThe CI pipeline is the natural hingepoint to start aligning engineering and security. A cultural shift, however, is absolutely needed. (If you're doubtful about this, here's a frank look at why [dev and sec don't get along](/blog/developer-security-divide/).) Modern engineering teams recognize that delivering a secure application is part of quality engineering. Engineers aren’t comfortable shipping applications with UI bugs, and they shouldn’t accept security holes either. \n\nSecurity, on the other hand, needs to shift from the blocker to speedy development and to the enabler of safety in an environment of high speed delivery. Modern security engineers are ensuring that their teams are working with safe-by-default frameworks, are equipped with developer-centric tooling, and that there are proper integration tests for business logic that can’t be tested by external tooling.\n\nWhile there is significant catch up needed, it is encouraging to see the leading software teams out there testing application security on every build.\n\n## Dig deeper\n\nTo learn more about adding AppSec tests to your CI build, join me at my [How Security Belongs in DevOps](https://sched.co/dUWD) talk at GitLab Commit on August 26th. You can also always sign up for a [free StackHawk trial or demo](https://www.stackhawk.com) or talk to your GitLab sales representative about the security features in GitLab Ultimate. And for the best of both worlds, check out more details on running [automated security testing with StackHawk in GitLab](https://docs.stackhawk.com/continuous-integration/gitlab.html).\n\n_Joni Klippert is founder & CEO of StackHawk, a software-as-a-service company built to help developers find and fix security vulnerabilities in their code. Joni has been building software for developers for more than 10 years, previously serving as VP Product, VictorOps from seed stage to acquisition by Splunk. Joni is a Colorado native and holds an MBA from the University of Colorado. She currently lives in Denver with her fiance Jason and Whippet \"Q\"._\n\nCover image by [Adi Goldstein](https://unsplash.com/@adigold1) on [Unsplash](https://unsplash.com)\n{: .note}\n\n\n\n",[110,1347,894,1307,1328,727],{"slug":4991,"featured":6,"template":678},"align-engineering-security-appsec-tests-in-ci","content:en-us:blog:align-engineering-security-appsec-tests-in-ci.yml","Align Engineering Security Appsec Tests In Ci","en-us/blog/align-engineering-security-appsec-tests-in-ci.yml","en-us/blog/align-engineering-security-appsec-tests-in-ci",{"_path":4997,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4998,"content":5004,"config":5009,"_id":5011,"_type":16,"title":5012,"_source":17,"_file":5013,"_stem":5014,"_extension":20},"/en-us/blog/how-gitlab-protects-your-ip",{"title":4999,"description":5000,"ogTitle":4999,"ogDescription":5000,"noIndex":6,"ogImage":5001,"ogUrl":5002,"ogSiteName":692,"ogType":693,"canonicalUrls":5002,"schema":5003},"How GitLab protects your IP","There are many ways in which hosting intellectual property in GitLab is not only secure but also flexible and invites collaboration.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667110/Blog/Hero%20Images/how-gitlab-protects-your-ip.jpg","https://about.gitlab.com/blog/how-gitlab-protects-your-ip","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab protects your IP\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jordi Mon\"}],\n        \"datePublished\": \"2020-08-07\",\n      }",{"title":4999,"description":5000,"authors":5005,"heroImage":5001,"date":5006,"body":5007,"category":14,"tags":5008},[4925],"2020-08-07","\n## How safe is your IP?\n\nOne of the main assets of any company is stored in the shape of code. The originality of the code makes it intellectual property, and thus companies would like it to be protected. But storing it safely away from others will hinder the same effort that brought it to life: Collaboration. So how can companies keep their IP safe while allowing their employees to work on its maintenance and development?\n\nGitLab repos, whether hosted online or privately, store one of the most valuable things your company is able to create: The digital assets used to build software products and services. GitLab is designed to make versioning and the collaboration over those assets as seamless and productive as possible.\n\nAlbeit that, is GitLab a safe place to store such valuable assets?\n\nLet's explore user access within GitLab and to what extent these users can access your company's IP.\n\n## Ways to access GitLab\n\n### LDAP, active directory, SAML, SSO\n\nFor the self-managed solution, GitLab is able to connect to any Lightweight Direct Access Portal (LDAP) service that is already set up and validate which users have access permissions. Users that access GitLab instances with LDAP on will have access only to the groups and projects assigned to them. The same is applicable to active directory.\n\nIf you are using GitLab.com, Security Assertion Markup Language (SAML) technology will mostly do the same described above. System for Cross-domain Identity Management (SCIM), the open standards running beneath SAML, is currently supported for Okta and and Azure but it will have broader support in the future. For example, check single sign-on (SSO) for enterprises or the general direction of this category.\n\n## How users are organized in GitLab\n\nAssigning roles with permissions is an easy way to know which user will be able to access and make changes to the IP.\n\nThere are six roles:\n\n| Guest | Auditor | Developer | Reporter | Maintainer | Owner |\n|:--|:--|:--|:--|:--|:--|\n|  |  |  |  |  |  |\n\nBy default all users have the following permissions in a project:\n\n* Create issues\n* Leave comments\n* Clone or download the project code\n\nBut these are the specific definitions for each user role:\n\n1. **Guests** are not active contributors in private projects. They can only view, and leave comments on issues.\n1. **Auditors** are given read-only access to all projects, groups, and other resources on the GitLab instance.\n2. **Reporters** are read-only contributors: They can't write to the repository, but can write on issues.\n3. **Developers** are direct contributors, and have access to everything to go from idea to production, unless something has been explicitly restricted (e.g., through branch protection).\n4. **Maintainers** are super-developers: They can push to main (master) and deploy to production. This role is often held by maintainers and engineering managers.\n\nSo what's happening at the project level? Well, the meat of it: Collecting requirements, defining user stories, prune and groom backlog, and merge requests are popping up like branches. It is at the project level where these four roles interact. But they don't do it only with the permissions their role provides them. There are other features at this level that can stop them or enable them to do certain things that will allow the project owners to parcel and control who's doing what to the IP hosted in the repo. Let's look at these features.\n\n## Where is my IP stored?\n\nIntellectual property is stored in repos, projects, and groups. Let's first step back and explain what the structure of these elements in GitLab looks like. Once we have a clear understanding of what and where information is stored, we can then jump to explaining who can access what information.\n\n### Repos\n\nA repo is a folder that lives either on your machine or on GitLab.com. It is what Git tracks every time you add and commit a change. It hosts your code and all the branches.\n\n### Projects\n\nRepos are the core part of every project. This is where GitLab's core [version control and collaboration](/topics/version-control/) capabilities shine. GitLab has project management features such as epics, subepics and issues, Wikis, GitLab pages, a Web IDE and many more features that make the repo the central part of a fully-featured source code workflow.\n\n### Groups\n\nGroups are a collection of projects. Members of groups with permissions will keep those permissions on every project included in the Group.\n\n5. Admin-only features can only be found in /admin. Outside of that, admins are the same as the highest possible permission (owner).\n6. Owners are essentially group-admins. They can give access to groups and have destructive capabilities.\n\nWatch the video below for a deep dive into repos, projects, and groups.\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube-nocookie.com/embed/4TWfh1aKHHw\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\n## How is my IP protected?\n\n### Protected branches\n\n[Protected branches](https://docs.gitlab.com/ee/user/project/protected_branches.html) are a simple method to keep IP protected. But if copies can be made, protected branches control who has access to those copies and for what purpose those copies are created.\n\n* Protect branches (PB) prevents everybody except users with *maintainer* permission from creating them.\n* PB prevents pushes from everybody except users with *allowed* permission.\n* PB prevents anyone from force pushing to the branch.\n* PB prevents anyone from deleting the branch.\n\nThese settings allow maintainers to forbid all pushes but allow incoming merges from developers. This forces every developer willing to make changes to the PB to open an MR. This exposes the changes he or she wants to commit and makes them subject to other security measures we will cover later, like push rules or MR approvals.\n\nAlso, pipeline security is a consequence of protected branches and you can read more about it [here](https://docs.gitlab.com/ee/user/project/repository/push_rules.html).\n\n### Protected tags\n\nAdding [protected tags](https://docs.gitlab.com/ee/user/project/protected_tags.html) to your repo is like bookmarking it in a way. The ability to label commits allows you to add details and context to what is happening at that point in time.\nIf a tag becomes an important milestone for the project you might as well protect it, right? That's is why only *maintainers* are allowed to create tags and, if protected, no one apart from them will be able to delete or modify them.\n\n### Push rules\n\nWe use [push rules](https://docs.gitlab.com/ee/user/project/repository/push_rules.html) at GitLab which prevents the majority of contributors from pushing directly to the main branch. We use GitLab Flow because we want to make small batch changes fast but also because we want to collaborate with our team members. A merge request flow like GitLab Flow does not push code to the main branch. This behavior, however, can be very common when working with Git.\nSo, push rules will use regular expression to scan commit messages, branch names or file details to prevent pushes from happening. These rules are usually used to enforce consistency throughout pushes. They allow teams to stay compliant with naming conventions, for example, or keep pushes linked to specific requirements by parsing for issue numbers. Non-GPG signed pushes can be automatically rejected with this feature too.\nThe possibilities are endless since push rules can be [customized](https://docs.gitlab.com/ee/development/changelog.html). Learn [here](https://docs.gitlab.com/ee/push_rules/push_rules.html#enabling-push-rules) what push rules are available on each tier.\n\n### Merge request approvals\n\nA merge request (MR) is a branch and the start to a conversation. When you open an MR you have effectively created a copy of the main branch hosted in the repo so you can make changes. Since the main branch is the IP's most valuable asset, all changes made in the opened MR [should require some extra sets of eyes on them](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/). When this feature is set it enforces code review. Does this imply that all changes will be reviewed by all team members? No, this feature can be customized in many ways.\n\nFirst, you can set approval rules that define how many approvals are required for the code change to be merged. You can even designate specific approvers, such as team lead. Designating approvers can be done in each MR or at the project level. If you know the MR may only affect the backend of the project, you might specify reviewer categories such as backend or database, QA and so on.\n\nOne special category is security. GitLab considers the [DevSecOps](/topics/devsecops/) use case as one of the fundamental trends in software development and is committed to provide the best security capabilities to software engineering teams.\nAmong other things, the ability to shift security checks left allows devs to run static code analysis at the rest level, and there is a [specific MR approval](/solutions/security-compliance/) that will prevent any MR from moving forward if certain security criteria are not met.\nTypically, these SASTs will look for security vulnerabilities and license compliance violations. Security teams can address problems that otherwise would have compounded by setting approvals to trigger when vulnerabilities or license violations are detected. DevSecOps with GitLab will automate security and compliance workflows to create an adaptable process for your development and security teams to work faster and better together.\n\n### Code owners\n\nThe code owners feature assigns ownership of files or paths to a certain group or user. Generally, this measure will allow the MR creator to determine who is the main stakeholder of certain files. Assigning code ownership fosters collaborative behaviors from users, such as asking for permission to merge or just requesting guidance. It becomes especially useful if the question for the code owner is unrelated to a code review or a MR approval.\nCode owners can become approvers of MRs if set to do so in an approval rule. Combining code ownership with protected branches is a good way to get more granular control over certain files and the changes applied on them.\n\n## How can I trace access and changes to my IP?\n\n### Audit events\n\nThe final method for controlling the security of your IP is by monitoring user activity. As in any other project management tool, users can access information in GitLab in many different ways and can interact with that information on multiple levels. The admin should be able to control events and stop those that do not comply with corporate policy. Access control and audit trails can provide increased layers of security and traceability that will improve your IP storage compliance.\n\n## How does this all work out for me?\n\nWell, you can follow the example of Northwestern Mutual. They manage permissions as code by dedicating a complete repo to host and manage their groups, teams, and deploy keys. Meaning, when a team wants to create a project that requires new roles, new access permissions, protected branches, etc., they’ll create an MR in the repo and submit those changes to the code owner for approval. Remember, in GitLab a MR is more than just a branch, it's also the start to a conversation, or even a proposed code change. This proposal particularly would imply changes in a yaml file that contain all admin level permissions.\n\nWatch the Northwestern Mutual team describe this in detail at GitLab Commit Brooklyn 2019:\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube-nocookie.com/embed/W1YMBc6kwUE\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\nCover image by [Jon Moore](https://unsplash.com/@thejmoore) on [Unsplash](https://unsplash.com/photos/bBavss4ZQcA)\n{: .note}\n",[1307,1347],{"slug":5010,"featured":6,"template":678},"how-gitlab-protects-your-ip","content:en-us:blog:how-gitlab-protects-your-ip.yml","How Gitlab Protects Your Ip","en-us/blog/how-gitlab-protects-your-ip.yml","en-us/blog/how-gitlab-protects-your-ip",{"_path":5016,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5017,"content":5023,"config":5029,"_id":5031,"_type":16,"title":5032,"_source":17,"_file":5033,"_stem":5034,"_extension":20},"/en-us/blog/how-gitlab-pages-uses-the-gitlab-api",{"title":5018,"description":5019,"ogTitle":5018,"ogDescription":5019,"noIndex":6,"ogImage":5020,"ogUrl":5021,"ogSiteName":692,"ogType":693,"canonicalUrls":5021,"schema":5022},"How GitLab Pages uses the GitLab API to serve content","GitLab Pages is changing the way it reads a project's configuration to speed up booting times and slowly remove its dependency to NFS.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679634/Blog/Hero%20Images/retrosupply-jLwVAUtLOAQ-unsplash.jpg","https://about.gitlab.com/blog/how-gitlab-pages-uses-the-gitlab-api-to-serve-content","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab Pages uses the GitLab API to serve content\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jaime Martínez\"}],\n        \"datePublished\": \"2020-08-03\",\n      }",{"title":5018,"description":5019,"authors":5024,"heroImage":5020,"date":5026,"body":5027,"category":14,"tags":5028},[5025],"Jaime Martínez","2020-08-03","\n\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2020-11-13.\n{: .alert .alert-info .note}\n\n[GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/) allows you to create and host GitLab project websites from a user account or group for free on [GitLab.com](https://www.gitlab.com/) or on your self-managed GitLab instance.\n\nIn this post, I will explain how the [GitLab Pages daemon](https://gitlab.com/gitlab-org/gitlab-pages) obtains a domain's configuration using the\nGitLab API, specifically on [GitLab.com](https://www.gitlab.com/).\n\n## How does GitLab Pages know where to find your website files?\n\nGitLab Pages will use object storage to store the contents of your web site. You can follow the development of this new feature [here](https://gitlab.com/groups/gitlab-org/-/epics/3901).\n\nCurrently, GitLab Pages uses an NFS shared mount drive to store the contents of your website.\nYou can define the value of this path by defining the [`pages_path`](https://docs.gitlab.com/ee/administration/pages/#change-storage-path) in your `/etc/gitlab/gitlab.rb` file.\n\nWhen you deploy a website using the `pages:` keyword in your `.gitlab-ci.yml` file, a `public` path artifact must be defined, containing the files available for your website. This `public` artifact eventually makes its way into the NFS shared mount.\n\nWhen you deploy a website to GitLab Pages a domain will be created based on the [custom Pages domain you have configured](https://docs.gitlab.com/ee/administration/pages/#configuration). For [GitLab.com](https://www.gitlab.com/), the pages domain is `*.gitlab.io`, if you create a project named `myproject.gitlab.io` and enable HTTPS, a wildcard SSL certificate will be used.\nYou can also [setup a custom domain](https://docs.gitlab.com/ee/user/project/pages/custom_domains_ssl_tls_certification/) for your project, for example `myawesomedomain.com`.\n\nFor every project (aka domain) that is served by the Pages daemon, there must be a directory in the NFS shared mount that matches your domain name and holds its contents. For example, if we had a project named `myproject.gitlab.io`, the Pages daemon would look for your `.html` files under `/path/to/shared/pages/myproject/myproject.gitlab.io/public` directory.\nThis is how GitLab Pages serves the content published by the `pages:` keyword in your CI configuration.\n\nBefore [GitLab 12.10](/releases/2020/04/22/gitlab-12-10-released/) was released, the Pages daemon would rely on a file named `config.json` located in your project's directory in the NFS shared mount, that is `/path/to/shared/pages/myproject/myproject.gitlab.io/config.json`.\nThis file contains metadata related to your project and [custom domain names](https://docs.gitlab.com/ee/user/project/pages) you may have setup.\n\n```json\n{\n  \"domains\":[\n    {\n      \"Domain\":\"myproject.gitlab.io\"\n    },\n    {\n      \"Domain\": \"mycustomdomain.com\",\n      \"Certificate\": \"--certificate contents--\",\n      \"Key\": \"--key contents--\"\n    }\n  ],\n  \"id\":123,\n  \"access_control\":true,\n  \"https_only\":true\n}\n```\nGitLab Pages has been a very popular addition to GitLab, and the number of hosted websites on GitLab.com has increased over time. We are currently hosting over 251,000 websites!\nOn start-up, the Pages daemon would [traverse all directories](https://gitlab.com/gitlab-org/gitlab-pages/-/blob/v1.21.0/app.go#L448) in the NFS shared mount and load the configuration of all the deployed Pages projects into memory.\nBefore 09-19-2019, the Pages daemon would take [approximately 25 minutes to be ready to serve requests](https://gitlab.com/gitlab-org/gitlab-pages/-/issues/252) per instance on GitLab.com.\nAfter upgrading GitLab Pages to version [`v1.9.0`](https://gitlab.com/gitlab-org/gitlab-pages/-/merge_requests/185), there were some improvements in some dependencies that reduced booting time to approximately five minutes. This was great but not ideal.\n\n## GitLab API-based configuration\n\nAPI-based configuration was [introduced](https://gitlab.com/gitlab-org/gitlab-pages/-/issues/282) in GitLab 12.10.\nWith API-based configuration, the daemon will start serving content in just a few seconds after booting.\nFor example, for a particular Pages node on GitLab.com, it usually is ready to serve content within one minute after starting.\n\nOn [GitLab.com](https://www.gitlab.com/), the Pages daemon now sources the domain configuration via an internal API endpoint\n`/api/v4/internal/pages?domain=myproject.gitlab.io`.\nThis is done on demand per domain and the configuration is cached in memory for a certain period of time to speed up serving content from that Pages node.\n\nThe response from the API is very similar to the contents of the `config.json` file:\n\n```json\n{\n    \"certificate\": \"--cert-contents--\",\n    \"key\": \"--key-contents--\",\n    \"lookup_paths\": [\n        {\n            \"access_control\": true,\n            \"https_only\": true,\n            \"prefix\": \"/\",\n            \"project_id\": 123,\n            \"source\": {\n                \"path\": \"myproject/myproject.gitlab.io/public/\",\n                \"type\": \"file\"\n            }\n        }\n    ]\n}\n```\n\nYou can see that the source type is `file`. This means that the Pages daemon will still serve the contents from the NFS shared mount. We are actively working on removing the NFS dependency from GitLab Pages by [updating the GitLab Pages architecture](https://gitlab.com/groups/gitlab-org/-/epics/1316).\n\nWe are planning to [transition GitLab pages to object storage instead of NFS](https://gitlab.com/groups/gitlab-org/-/epics/3901). This will essentially [enable GitLab Pages to run on Kubernetes](https://gitlab.com/gitlab-org/gitlab/-/issues/39586) in the future.\n\n**Update**:\nWe have now [rolled out zip source type on GitLab.com](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/2808). This is behavior is behind feature flag and it's not the final implementation.\nAs of 10-22-2020 we serve about 75% of Pages projects from zip and object storage and we're getting closer to removing the NFS dependency!\n\n## Self-managed GitLab instances\n\nThe changes to the GitLab Pages architecture were piloted on GitLab.com, which is possibly the largest GitLab Pages implementation.\nOnce all the changes supporting the move to an API-based configuration are completed, they will be rolled out to self-managed customers.\nYou can find more details and the issues we faced while rolling out API-based configuration in this [issue](https://gitlab.com/gitlab-org/gitlab-pages/-/issues/282).\n\nIf you can't wait to speed up your Pages nodes startup, we have a potential guide in this [issue description](https://gitlab.com/gitlab-org/gitlab/-/issues/28298#potential-workaround) which explains how we enabled the API on GitLab.com. However, this method will be removed in the near future.\n\n**Update**:\nYou can now enable API-based configuration by following [this guide](https://docs.gitlab.com/ee/administration/pages/#gitlab-api-based-configuration).\n\n## Domain source configuration and API status\n\nIn the meantime, we are working toward adding [a new configuration flag for GitLab Pages](https://gitlab.com/gitlab-org/gitlab/-/issues/217912) which will allow you to choose the domain configuration source by specifying `domain_config_source` in your `/etc/gitlab/gitlab.rb` file.\nBy default, GitLab Pages will use the `disk` source configuration the same way is used today.\n\nIn the background, the Pages daemon will start [checking the API status](https://gitlab.com/gitlab-org/gitlab-pages/-/merge_requests/304) by calling the `/api/v4/internal/pages/status` endpoint. This will help you check if the Pages daemon is ready to talk to the GitLab API, especially when you are [running Pages on a separate server](https://docs.gitlab.com/ee/administration/pages/#running-gitlab-pages-on-a-separate-server).\n\nPlease check the [GitLab Pages adminstration guide](https://docs.gitlab.com/ee/administration/pages/#troubleshooting) for further troubleshooting.\n\n\u003C!-- image: image-url -->\nCover image by [@RetroSupply](https://unsplash.com/@retrosupply) on [Unsplash](https://unsplash.com/photos/jLwVAUtLOAQ)\n{: .note}\n",[915,1002],{"slug":5030,"featured":6,"template":678},"how-gitlab-pages-uses-the-gitlab-api","content:en-us:blog:how-gitlab-pages-uses-the-gitlab-api.yml","How Gitlab Pages Uses The Gitlab Api","en-us/blog/how-gitlab-pages-uses-the-gitlab-api.yml","en-us/blog/how-gitlab-pages-uses-the-gitlab-api",{"_path":5036,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5037,"content":5042,"config":5049,"_id":5051,"_type":16,"title":5052,"_source":17,"_file":5053,"_stem":5054,"_extension":20},"/en-us/blog/aws-fargate-codebuild-build-containers-gitlab-runner",{"title":5038,"description":5039,"ogTitle":5038,"ogDescription":5039,"noIndex":6,"ogImage":4940,"ogUrl":5040,"ogSiteName":692,"ogType":693,"canonicalUrls":5040,"schema":5041},"Building containers with GitLab Runner & AWS Fargate executor","Build containers with the AWS Fargate Custom Executor for GitLab Runner and AWS CodeBuild","https://about.gitlab.com/blog/aws-fargate-codebuild-build-containers-gitlab-runner","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to build containers with the AWS Fargate Custom Executor for GitLab Runner and AWS CodeBuild\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Elliot Rushton\"}],\n        \"datePublished\": \"2020-07-31\",\n      }",{"title":5043,"description":5039,"authors":5044,"heroImage":4940,"date":5046,"body":5047,"category":14,"tags":5048},"How to build containers with the AWS Fargate Custom Executor for GitLab Runner and AWS CodeBuild",[5045],"Elliot Rushton","2020-07-31","\n\nAWS Fargate does not allow containers to run in privileged mode. This means Docker-in-Docker (DinD), which enables the building and running of container images inside of containers, does not work with the [AWS Fargate Custom Executor driver for GitLab Runner](https://gitlab.com/gitlab-org/ci-cd/custom-executor-drivers/fargate). The good news is that users don't have to be blocked by this and may use a cloud-native approach to build containers, effectively leveraging a seamless integration with AWS CodeBuild in the [CI/CD pipeline](/topics/ci-cd/).\n\nWe provide in-depth instructions on how to autoscale GitLab CI on AWS Fargate in [GitLab Runner's documentation](https://docs.gitlab.com/runner/configuration/runner_autoscale_aws_fargate/index.html). In this blog post, we explain how to instrument CI containers and source repositories to trigger AWS CodeBuild and use it to build container images.\n\n## Architecture overview\n\n![AWS Fargate + CodeBuild: a cloud-native approach to build containers with GitLab Runner](https://about.gitlab.com/images/blogimages/build-container-image-runner-fargate-codebuild.png)\nHow distinct CI workloads run on Fargate.\n{: .note.text-center}\n\nThe picture above illustrates distinct GitLab CI workloads running on Fargate. The container identified by `ci-coordinator (001)` is running a typical CI job which does not build containers, so it does not require additional configuration or dependencies. The second container, `ci-coordinator (002)`, illustrates the problem to be tackled in this post: The CI container includes the AWS CLI in order to send content to an Amazon S3 Bucket, trigger the AWS CodeBuild job, and fetch logs.\n\n## Prerequisites\n\nOnce these prerequisites are configured, you can dive into the six-step process to configure CI containers and source repositories to trigger AWS CodeBuild and use it to build container images.\n\n- The [AWS Fargate Custom Executor driver for GitLab Runner](https://gitlab.com/gitlab-org/ci-cd/custom-executor-drivers/fargate) must be set-up appropriately.\n- Ensure the AWS IAM user permissions include the ability to create and configure S3 and CodeBuild resources.\n- AWS IAM user or service role with permissions to upload files to S3, start CodeBuild jobs, and read CloudWatch Logs.\n- AWS IAM user with permissions to create and configure IAM Policies and Users.\n\n## Step 1: Create an AWS S3 bucket\n\n1. In the top menu of [AWS Management Console](https://aws.amazon.com/console/) click Services.\n1. In the Storage section, select `S3`.\n1. Click `Create bucket`.\n1. Choose a descriptive name (`ci-container-build-bucket` will be used as example) and select your preferred region.\n1. Leave all other fields with default values and click `Create bucket`.\n1. In the Buckets list, click the name of the bucket you created.\n1. Click `Create folder`.\n1. Give it the `gitlab-runner-builds` name.\n1. Click `Save`.\n\n## Step 2: Create an AWS CodeBuild Project\n\n1. Using the AWS Console, click `Services` in the top menu\n1. Select `CodeBuild` in the Developer Tools section\n1. Click `Create build project`\n1. In `Project Name` enter `ci-container-build-project`\n1. In `Source provider` select `Amazon S3`\n1. In `Bucket` select the `ci-container-build-bucket` created in step one\n1. In S3 object key or S3 folder enter `gitlab-runner-builds/build.zip`\n1. In `Environment image`, select `Managed image`\n1. For `Operating system` select your preferred OS from the available options\n1. For `Runtime(s)`, choose `Standard`.\n1. For `Image`, select `aws/codebuild/standard:4.0`\n1. For `Image version`, select `Always use the latest image for this runtime version`\n1. For `Environment type` select `Linux`\n1. Check the `Privileged` flag\n1. For the `Service role` select `New service role` and note the sugggested `Role name`\n1. For `Build specifications` select `Use a buildspec file`\n1. Scroll down to the bottom of the page and click \"Create build project\"\n\n## Step 3: Build the CI container image\n\nAs stated in Autoscaling GitLab CI on AWS Fargate, a [custom container is required](https://docs.gitlab.com/runner/configuration/runner_autoscale_aws_fargate/index.html#step-1-prepare-a-base-container-image-for-the-aws-fargate-task) to run GitLab CI jobs on Fargate. Since the solution relies on communicating with S3 and CodeBuild, you'll need to [have the AWS CLI tool](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) available in the CI container.\n\nInstall the `zip` tool to make S3 communication smoother. As an example of a Ubuntu-based container, the lines below must be added to the CI container's `Dockerfile`:\n\n```dockerfile\nRUN apt-get update -qq -y \\\n    && apt-get install -qq -y curl unzip zip \\\n    && curl -Lo awscliv2.zip https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip \\\n    && unzip awscliv2.zip \\\n    && ./aws/install\n```\n\n## Step 4: Add CodeBuild configuration to the repository\n\nBy default, CodeBuild looks for a file named `buildspec.yml` in the build source. This file will instruct CodeBuild on how to build and publish the resulting container image. Create this file with the content below and commit it to the git repository (_if you changed the **Buildspec name** when configuring the CodeBuild project [in Step 2](#buildspec), please create the file accordingly_):\n\n```yaml\nversion: 0.2\n\nphases:\n  install:\n    commands:\n      - nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://127.0.0.1:2375 --storage-driver=overlay2&\n      - timeout 15 sh -c \"until docker info; do echo .; sleep 1; done\"\n  build:\n    commands:\n      - echo Build started on `date`\n      - docker -v\n      - docker build -t \u003CIMAGE-TAG> .\n      - echo Build completed on `date`\n```\n\n## Step 5: Set up the GitLab CI job\n\nNow we will set up the GitLab CI job that will pull everything together.\n\n### Interacting with CodeBuild through the AWS CLI\n\nThe CI job will need to interact with AWS Cloud to start CodeBuild jobs, poll the status of the jobs, and fetch logs. Commands such as `aws codebuild` and `aws logs` help to tackle this, so let's use them in a script, `codebuild.sh`:\n\n```bash\n#!/bin/bash\n\nbuild_project=ci-container-build-project\nbuild_id=$(aws codebuild start-build --project-name $build_project --query 'build.id' --output text)\nbuild_status=$(aws codebuild batch-get-builds --ids $build_id --query 'builds[].buildStatus' --output text)\n\nwhile [ $build_status == \"IN_PROGRESS\" ]\ndo\n    sleep 10\n    build_status=$(aws codebuild batch-get-builds --ids $build_id --query 'builds[].buildStatus' --output text)\ndone\n\nstream_name=$(aws codebuild batch-get-builds --ids $build_id --query 'builds[].logs.streamName' --output text)\ngroup_name=$(aws codebuild batch-get-builds --ids $build_id --query 'builds[].logs.groupName' --output text)\n\naws logs get-log-events --log-stream-name $stream_name --log-group-name $group_name --query 'events[].message' --output text\necho Codebuild completed with status $build_status\n```\n\n### Add a job to build the resulting container\n\nOnce the steps one through five are complete, the source repository will be structured as follows:\n\n```plaintext\n/sample-repository\n  ├── .gitlab-ci.yml\n  ├── buildspec.yml\n  ├── codebuild.sh\n  ├── Dockerfile\n  ├── \u003CAPPLICATION-FILES>\n```\n\nThe final step to build the container is to add a job to `.gitlab-ci.yml`:\n\n```yaml\ndockerbuild:\n  stage: deploy\n  script:\n    - zip build.zip buildspec.yml Dockerfile \u003CAPPLICATION-FILES>\n    - aws configure set default.region \u003CREGION>\n    - aws s3 cp build.zip s3://ci-container-build-bucket/gitlab-runner-builds/build.zip\n    - bash codebuild.sh\n```\n\nBelow are some definitions from terms in the script:\n\n- `\u003CAPPLICATION-FILES>` is a placeholder for the files that will be required to successfully build the resulting container image using the `Dockerfile`, e.g., `package.json` and `app.js` in a Node.js application\n- `Dockerfile` is used to build the resulting image. _Note: It is not the same file used to build the CI container image, mentioned in [Step 3: Build the CI container image](#step-3-build-the-ci-container-image)_\n- Zip and AWS CLI must be installed in the CI container to make the script work – refer to [Step 3: Build the CI container image](#step-3-build-the-ci-container-image) for details\n\n## Step 6: Set up AWS credentials\n\nThe final step is to set up the AWS credentials. As we already mentioned, the CI job will interact with AWS through the AWS CLI to perform a number of operations, and to do that, the AWS CLI needs to authenticate as an IAM user with the permissions listed below. We recommend you create a new user and grant it minimal privileges instead of using your personal AWS user account. For the sake of simplicity, we suggest this approach to complete this walk-through guide.\n\nThis AWS user only needs programmatic access and do not forget to make note of its Access key ID and Secret access key – they will be needed later. A simple way to grant only the minimal privileges for the new user is to create a customer managed policy since it can be directly attached to the user. A group might also be used to grant the same privileges for more users, but it is not mandatory for running the sample workflow.\n\n- S3\n\n  ```json\n  {\n    \"Effect\": \"Allow\",\n    \"Action\": \"s3:PutObject\",\n    \"Resource\": \"arn:aws:s3:::ci-container-build-bucket/gitlab-runner-builds/*\"\n  }\n  ```\n\n- CodeBuild\n\n  ```json\n  {\n    \"Effect\": \"Allow\",\n    \"Action\": [\"codebuild:StartBuild\", \"codebuild:BatchGetBuilds\"],\n    \"Resource\": \"arn:aws:codebuild:\u003CREGION>:\u003CACCOUNT-ID>:project/ci-container-build-project\"\n  }\n  ```\n\n- CloudWatch Logs\n\n  ```json\n  {\n    \"Effect\": \"Allow\",\n    \"Action\": \"logs:GetLogEvents\",\n    \"Resource\": \"arn:aws:logs:\u003CREGION>:\u003CACCOUNT-ID>:log-group:/aws/codebuild/ci-container-build-project:log-stream:*\"\n  }\n  ```\n\nThe access credentials can be provided to AWS CLI through GitLab CI environment variables. Please go to your GitLab Project's **CI/CD Settings**, click **Expand** in the **Variables** section, add `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` with the values you got from the AWS Management Console after creating the IAM user. See the image below for the result you can expect:\n\n![Providing AWS credentials for GitLab Runner](https://about.gitlab.com/images/blogimages/build-container-image-runner-fargate-codebuild-credentials.png)\n\nUsing an IAM Role and [Amazon ECS temporary/unique security credentials](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html) is also possible, but not covered in this tutorial.\n{: .note.text-center}\n\n## Step 7: It's showtime\n\nWith all configurations in place, commit the changes and trigger a new pipeline to watch the magic happen!\n\n### Just need the highlights?\n\n1. The CI job script added in [Step 5](#add-a-job-to-build-the-resulting-container) compresses the resulting container image build files into `build.zip`\n1. `build.zip` is then uploaded to the S3 Bucket we created in [Step 1: Create an Amazon S3 Bucket](#step-1-create-an-amazon-s3-bucket)\n1. Next, `codebuild.sh` starts a CodeBuild job based on the project created in [Step 2: Create an AWS CodeBuild Project](#step-2-create-an-aws-codebuild-project) (Note: that project has an S3 object as its source provider)\n1. Finally, the CodeBuild job downloads `gitlab-runner-builds/build.zip` from S3, decompresses it and – from `buildspec.yml`– builds the resulting container image\n\nA sample repository, demonstrating everything described in the article is available [here](https://gitlab.com/gitlab-org/ci-cd/custom-executor-drivers/codebuild-on-fargate-example/).\n\n## Cleanup\n\nIf you want to perform a cleanup after testing the custom executor with AWS Fargate and CodeBuild, you should remove the following objects:\n\n- AWS S3 bucket created in [Step 1](#step-1-create-an-amazon-s3-bucket)\n- AWS CodeBuild project created in [Step 2](#step-2-create-an-aws-codebuild-project)\n- `RUN` command added to the CI container image in [Step 3](#step-3-build-the-ci-container-image)\n- The `buildspec.yml` file created in [Step 4](#step-4-add-codebuild-configuration-to-the-repository)\n- The `codebuild.sh` file created in [Step 5](#step-5-set-up-the-gitlab-ci-job)\n- The `dockerbuild` job added to `.gitlab-ci.yml` in [Step 5](#step-5-set-up-the-gitlab-ci-job)\n- IAM policy, user (and maybe group) created in [Step 6](#step-6-set-up-aws-credentials)\n- GitLab CI/CD variables in [Step 6](#step-6-set-up-aws-credentials)\n\nRead more about GitLab and AWS:\n-[How autoscaling GitLab CI works on AWS Fargate](/blog/introducing-autoscaling-gitlab-runners-on-aws-fargate/)\n-[GitLab 12.10 released with Requirements Management and Autoscaling CI on AWS Fargate](/releases/2020/04/22/gitlab-12-10-released/)\n-[Announcing 32/64-bit Arm Runner Support for AWS Graviton2](/blog/gitlab-arm-aws-graviton2-solution/)\n\nCover image by [Lucas van Oort](https://unsplash.com/@switch_dtp_fotografie) on [Unsplash](https://unsplash.com)\n{: .note}\n",[110,232,726],{"slug":5050,"featured":6,"template":678},"aws-fargate-codebuild-build-containers-gitlab-runner","content:en-us:blog:aws-fargate-codebuild-build-containers-gitlab-runner.yml","Aws Fargate Codebuild Build Containers Gitlab Runner","en-us/blog/aws-fargate-codebuild-build-containers-gitlab-runner.yml","en-us/blog/aws-fargate-codebuild-build-containers-gitlab-runner",{"_path":5056,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5057,"content":5063,"config":5067,"_id":5069,"_type":16,"title":5070,"_source":17,"_file":5071,"_stem":5072,"_extension":20},"/en-us/blog/use-gitlab-with-vscode",{"title":5058,"description":5059,"ogTitle":5058,"ogDescription":5059,"noIndex":6,"ogImage":5060,"ogUrl":5061,"ogSiteName":692,"ogType":693,"canonicalUrls":5061,"schema":5062},"How we created a GitLab Workflow Extension for VS Code","Now you can leverage GitLab from within Visual Studio Code with our official GitLab Workflow Extension.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681469/Blog/Hero%20Images/gitlab-vscode-blog-image2.jpg","https://about.gitlab.com/blog/use-gitlab-with-vscode","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we created a GitLab Workflow Extension for VS Code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Roman Kuba\"}],\n        \"datePublished\": \"2020-07-31\",\n      }",{"title":5058,"description":5059,"authors":5064,"heroImage":5060,"date":5046,"body":5065,"category":14,"tags":5066},[4356],"\n\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2020-08-04.\n{: .alert .alert-info .note}\n\nThe people who work at GitLab are encouraged to build the things they want and need, which helps us expand the ways we work with our growing product. We're thrilled to announce that we've created an official GitLab Workflow Extension for VS Code.\n\n## How did we get here?\n\n[More than two years ago](/blog/gitlab-vscode-extension/), [Fatih Acet](https://gitlab.com/fatihacet), a former senior frontend engineer, [Plan](/handbook/engineering/development/dev/plan-project-management/), started working on a [VS Code extension](/blog/gitlab-vscode-extension/) to allow users to interact with GitLab from within their code editor. At GitLab, [everything starts with a Merge Request](/handbook/communication/#start-with-a-merge-request), which is exactly how Fatih started building the extension. Fatih, along with more than 25 contributors, continued to expand on the extension by adding new features. The extension has been installed more than 160,000 times.\n\nIt’s been remarkable to see the way the community collaborated to build the extension, making it a tool that is valuable to their work. The GitLab Workflow Extension is the perfect case study of how [developers can create meaningful work at this company](/direction/create/editor_extension/#where-we-are-headed).\n\nWhen Fatih decided to move on from GitLab in March 2020, we had an opportunity to take over the GitLab Workflow Extension, turning it into a tool GitLab would officially maintain and support. We jumped at the opportunity to maintain an auxilary project outside of the main GitLab project. As we continue to move fast and create the best experiences possible for our users, we expect this extension to become a [key component of our strategy](/direction/create/editor_extension/#overview).\n\n## How to use the extension\n\nIf you want to start using the extension, you can install it from within VS Code directly by searching for [GitLab Workflow](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow) which is now published through an official GitLab account.\n\nIf you were already using the extension, it automatically updated to the GitLab publisher, and you might have already seen a few updates coming in.\n\n## What improvements have we made?\n\nWhen we took over the extension, we worked with other teams across GitLab to immediately perform an [application security review](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/170). Along the way, we made sure to create a [security release-process](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/blob/master/docs/security-releases.md). We did this to ensure that users were safe to continue using the extension and so that we could fix any problems that surface. We also worked through some automation to help with publishing the extension and [begin to lay a foundation for future testing](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/199).\n\nWe also shipped [version 3.0.0](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/blob/master/CHANGELOG.md#v300-2020-06-25) which was spearheaded by our community and helped to resolve some long-standing bugs and issues. The extension has come a long way in just a few short weeks. We’re excited by the progress we’re making and the engagement we’re continuing to see, but there is still a lot that needs to be done.\n\n## What’s next?\n\nNothing in software development is perfect, and so we are aware of the shortcomings of the extension, some inconsistencies, and some long open feature requests. You can see our many to-dos on our GitLab Workflow Extension [issues list](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues). For now, we’re focused on triaging the existing issues and capturing any new bugs. You should see much more involvement from our [Create:Editor](/handbook/engineering/development/dev/create/ide/) team as we continue these efforts, and we’re looking forward to engaging with the community on these items.\n\nWe’re also evaluating the best path forward for maintaining the extension, by focusing on the test-suite and code-quality, so we won’t break things by accident. You can join us in our discussion on [this issue](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/204). While this might slow down some new feature releases in the short-term, we’re confident these are the right long-term decisions to ensure you have an extension you can trust, so you can make the GitLab Extension an integral part of your workflow.\n\n## Everyone can contribute\n\nThe extension is open source, and we're improving the \"[How to Contribute](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/blob/master/CONTRIBUTING.md)\" guides alongside some other documentation. We want to have a space where everyone can contribute and make this extension better for all of us.\n\n## Check out more engineering content on GitLab\n\n- [How to build containers with the AWS Fargate Custom Executor for GitLab Runner and AWS CodeBuild](/blog/aws-fargate-codebuild-build-containers-gitlab-runner/)\n- [How application security engineers can use GitLab to secure their projects](/blog/secure-stage-for-appsec/)\n- [Best practices to keep your Kubernetes runners moving](/blog/best-practices-for-kubernetes-runners/)\n\n\n## Read more on Visual Studio and GitLab:\n\n- [Four new tools for your Visual Studio Code and GitLab tool belt](/blog/vscode-workflow-new-features/)\n\n- [Visual Studio code editor: Eight tips for using GitLab VS Code](/blog/vscode-workflows-for-working-with-gitlab/)\n\n- [VS Code extension development with GitLab](/blog/vscode-extension-development-with-gitlab/)\n\n- [How to do GitLab merge request reviews in VS Code](/blog/mr-reviews-with-vs-code/)\n\n",[1328,915],{"slug":5068,"featured":6,"template":678},"use-gitlab-with-vscode","content:en-us:blog:use-gitlab-with-vscode.yml","Use Gitlab With Vscode","en-us/blog/use-gitlab-with-vscode.yml","en-us/blog/use-gitlab-with-vscode",{"_path":5074,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5075,"content":5081,"config":5086,"_id":5088,"_type":16,"title":5089,"_source":17,"_file":5090,"_stem":5091,"_extension":20},"/en-us/blog/effective-ci-cd-pipelines",{"title":5076,"description":5077,"ogTitle":5076,"ogDescription":5077,"noIndex":6,"ogImage":5078,"ogUrl":5079,"ogSiteName":692,"ogType":693,"canonicalUrls":5079,"schema":5080},"Want a more effective CI/CD pipeline? Try our pro tips","Here’s how to take your CI/CD pipeline to the next level with hands on advice about faster builds, better security and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681447/Blog/Hero%20Images/cicdpipelines.jpg","https://about.gitlab.com/blog/effective-ci-cd-pipelines","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Want a more effective CI/CD pipeline? Try our pro tips\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-07-29\",\n      }",{"title":5076,"description":5077,"authors":5082,"heroImage":5078,"date":5083,"body":5084,"category":14,"tags":5085},[2150],"2020-07-29","\n\nNow that your [CI/CD pipeline](/topics/ci-cd/) is up and running, it’s time to fine-tune the performance. This hands on guide will walk you through tweaks that will improve a CI/CD pipeline’s speed, functionality, security, and integration with other tools and platforms.\n\n## Built for speed\n\nCI/CD and DevOps promises faster releases and we know it’s true: Even a basic automated pipeline is much speedier than the old days of manual handoffs. But there are ways to make the CI/CD pipeline even zippier. One straightforward option that guarantees faster builds is to [autoscale runners](/blog/making-builds-faster-autoscaling-runners/). If you have 15 minutes to spare, you can link your GitLab CI pipeline to the [Google Kubernetes engine](/blog/gitlab-ci-on-google-kubernetes-engine/). And it doesn’t get much faster than using the [Auto DevOps option](/blog/guide-to-ci-cd-pipelines/) if you’re setting up a new pipeline from scratch.\n\n## Do more with less\n\nOnce a pipeline is humming along, it’s time to think about tinkering with what you have. This is one of our favorite things to do at GitLab – we even used our CI/D pipeline to [turn our group conversation into a podcast](/blog/group-conversation-podcast/). We had an [unconventional CI/CD journey](/blog/gitlab-journey-to-cicd/), which goes a long way to explaining our overall enthusiasm for this technology.\n\nOur best advice when it comes to an effective CI/CD pipeline is to think outside the box. Need build images? It’s [easy to do](/blog/building-build-images/) with your CI/CD pipeline. You can also [create a cross-project pipeline](/blog/cross-project-pipeline/), or [build a bridge between Rust and Firebase](/blog/python-rust-and-gitlab-ci/).\n\n## Make it secure\n\nIt’s fun to play around with CI/CD functionality, but it’s critical to make sure your pipeline is secure. Start by making sure you [know the threat landscape](/blog/defend-cicd-security/). If you store key data in secrets management service [Vault](https://www.vaultproject.io), here’s how GitLab [makes the integration process easier and safer](/blog/vault-integration-process/).\n\nAnd for Jenkins users, it’s simple to [create deterministic security jobs](https://docs.gitlab.com/ee/integration/jenkins.html) from within GitLab.\n\n## Work with what you have\n\nNo effective CI/CD pipeline exists in a vacuum and to get the most out of yours it’s important to seamlessly integrate with other platforms and tools.\n\nAWS users can [set up multi-account SAM deployments](/blog/multi-account-aws-sam-deployments-with-gitlab-ci/) or [autoscale GitLab CI](/blog/introducing-autoscaling-gitlab-runners-on-aws-fargate/) on Fargate.\n\nTeams working on Android projects can [can create a customized GitLab CI](/blog/setting-up-gitlab-ci-for-android-projects/) easily.\n\nAnd finally it’s possible to take advantage of Google’s Firebase, a backend-as-a-service tool, so you can enable [continuous deployment of database, serverless and apps](/blog/gitlab-ci-cd-with-firebase/).\n\n**Read more about CI/CD:**\n\n* [The four big benefits](/blog/positive-outcomes-ci-cd/) of CI/CD\n\n* [CI/CD challenges](/blog/modernize-your-ci-cd/) to consider\n\n* Everything you need to know about [Auto DevOps](/blog/auto-devops-explained/)\n\nCover image by [Jacek Dylag](https://unsplash.com/@dylu) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[110,894,232],{"slug":5087,"featured":6,"template":678},"effective-ci-cd-pipelines","content:en-us:blog:effective-ci-cd-pipelines.yml","Effective Ci Cd Pipelines","en-us/blog/effective-ci-cd-pipelines.yml","en-us/blog/effective-ci-cd-pipelines",{"_path":5093,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5094,"content":5100,"config":5105,"_id":5107,"_type":16,"title":5108,"_source":17,"_file":5109,"_stem":5110,"_extension":20},"/en-us/blog/safe-deploys",{"title":5095,"description":5096,"ogTitle":5095,"ogDescription":5096,"noIndex":6,"ogImage":5097,"ogUrl":5098,"ogSiteName":692,"ogType":693,"canonicalUrls":5098,"schema":5099},"GitLab's guide to safe deployment practices","It's important to safeguard your deployment process. Here's our best advice to protect your environments.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678412/Blog/Hero%20Images/safe_deploy.jpg","https://about.gitlab.com/blog/safe-deploys","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's guide to safe deployment practices\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Orit Golowinski\"}],\n        \"datePublished\": \"2020-07-23\",\n      }",{"title":5095,"description":5096,"authors":5101,"heroImage":5097,"date":5102,"body":5103,"category":14,"tags":5104},[4631],"2020-07-23","\nHere at GitLab we understand the importance of safe deployment practices. \n\n[Progressive delivery](/direction/ops/#progressive-delivery) is continuous delivery with fine-grained control over who sees the change. This ensures that all code and configuration updates go through the [CI/CD stages](/topics/ci-cd/) to catch any regressions or bugs before they reach customers. If something does make it past those gates, progressive delivery makes sure any negative impact is as small as possible.\n\nWe have recently added several features that add safeguards to your deployment process, which we will review in this blog post.\n\n### Protected Environments\n\nIt is important that deploy jobs are restricted to only those who are authorized to deploy in that environment, and we call this restriction by roles \"protected\". While this feature has been around for a while, it is important to remember that this should be the first step to take when thinking about safe deployments. \n\nTake a deeper dive into [protected environments](https://docs.gitlab.com/ee/ci/environments/protected_environments.html).\n\n### Sequential Deployment (or Safe Continuous Deployment)\n\nIf your project follows the Continuous Deployment practice that deploys the `master` branch to the production environment with GitLab CI/CD pipelines, you may encounter the following problems due to the asynchronous nature of pipeline jobs:\n\n- Multiple deployment jobs run concurrently, targeting the same environment. This can make the environment unstable because the deployment script could conflict and finish in an incomplete state.\n- An older deployment job could overwrite the latest deployment, resulting in an unintentional rollback. Some users could be exposed to old feature sets on the production website even though the pipeline shows that the latest deployment job successfully finished.\n- A pipeline might deploy to production at the worst time, such as on a holiday or over the weekend, when there is limited staff available to solve potential problems.\n\nTo address these problems, GitLab provides the following options:\n\n* [Limit job concurrency](https://docs.gitlab.com/ee/ci/yaml/#resource_group)\n* [Prevent deployment of old versions](https://docs.gitlab.com/ee/ci/pipelines/settings.html#skip-outdated-deployment-jobs)\n* [Deploy freeze](https://docs.gitlab.com/ee/user/project/releases/index.html#prevent-unintentional-releases-by-setting-a-deploy-freeze)\n\n## Limit job concurrency\n\nYou can limit deployment concurrency by adding a `resource_group` to any `.gitlab-ci.yml` jobs that should run one at a time. For example:\n\n* Pipeline-A starts running with SHA-A\n* Pipeline-B starts running with SHA-B (newer)\n* Pipeline-A starts a deployment\n* Pipeline-B waits for Pipeline-A's deployment to finish\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/m6eZb6U-M2A\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n### Prevent deployment of old versions\n\nThe execution order of pipeline jobs can vary from run to run, which could cause undesired behavior. For example, a deployment job in a newer pipeline could finish before a deployment job in an older pipeline. This creates a race condition where the older deployment finishes later, overwriting the \"newer\" deployment.\n\nYou can ensure that older deployment jobs are cancelled automatically when a newer deployment runs by enabling the [prevent deployment of old versions](https://docs.gitlab.com/ee/ci/pipelines/settings.html#skip-outdated-deployment-jobs) feature.\n\n* Pipeline-A starts running with SHA-A\n* Pipeline-B starts running with SHA-B (newer)\n* Pipeline-B finishes. Now SHA-B is on the production environment\n* Pipeline-A is canceled automatically because it was going to deploy SHA-A to production\n\n![Prevent deployment of old versions](https://about.gitlab.com/images/blogimages/older_job.png){: .shadow}\n\n## Deployment Freeze\n\nTo prevent deployments for a particular period, such as during a planned holiday when most employees are out, you can set up a deploy freeze. During a deploy freeze, no deployments can be executed. This is helpful to ensure that deployments do not happen unexpectedly.\n\nFind more detailed information about [deployment safety](https://docs.gitlab.com/ee/ci/environments/deployment_safety.html).\n\n**Read more about GitLab and safety:**\n\n* [Capitalize on GitLab security tools](https://docs.gitlab.com/ee/integration/jenkins.html)\n\n* How app sec engineers [can use GitLab to improve security](/blog/secure-stage-for-appsec/)\n\n* Wondering [how secure GitLab is?](/blog/soc2-compliance/)\n\nCover image by [Mathew Schwartz](https://unsplash.com/photos/qcpwU_oMyu8) on [Unsplash](https://unsplash.com)\n{: .note}\n",[110,2331,1307],{"slug":5106,"featured":6,"template":678},"safe-deploys","content:en-us:blog:safe-deploys.yml","Safe Deploys","en-us/blog/safe-deploys.yml","en-us/blog/safe-deploys",{"_path":5112,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5113,"content":5119,"config":5124,"_id":5126,"_type":16,"title":5127,"_source":17,"_file":5128,"_stem":5129,"_extension":20},"/en-us/blog/integrating-azure-devops-scm-and-gitlab",{"title":5114,"description":5115,"ogTitle":5114,"ogDescription":5115,"noIndex":6,"ogImage":5116,"ogUrl":5117,"ogSiteName":692,"ogType":693,"canonicalUrls":5117,"schema":5118},"How to integrate Azure DevOps repositories with GitLab","How to keep your code in an Azure DevOps repository and run CI/CD with GitLab pipelines.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664363/Blog/Hero%20Images/aleksey-kuprikov.jpg","https://about.gitlab.com/blog/integrating-azure-devops-scm-and-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to integrate Azure DevOps repositories with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2020-07-09\",\n      }",{"title":5114,"description":5115,"authors":5120,"heroImage":5116,"date":5121,"body":5122,"category":14,"tags":5123},[1019],"2020-07-09","\n\nRecently we’ve been asked by several people if it is possible to integrate between Azure DevOps/VSTS (Visual Studio Team Services) source code management and GitLab. They are looking for a modern [CI/CD solution](/topics/ci-cd/) like GitLab, but as part of a gradual transition they still need to keep managing their code in Azure DevOps/VSTS. \n\n## Does Azure DevOps integrate with GitLab?\n\nYes, Azure DevOps Services does integrate with GitLab.\n\nAlthough we of course recommend using GitLab CI/CD together with our built-in GitLab SCM, this integration of Azure DevOps source code management and GitLab makes it possible to migrate slowly from Azure DevOps by leaving your code in the Azure [DevOps](/topics/devops/) repository while you adopt GitLab CI/CD. This integration is possible with both the self-managed and SaaS versions of GitLab. The integration works only with Azure DevOps/VSTS git version control. TFVC (Team Foundation Version Control) isn’t supported. \n\n### In GitLab, there are two features that enable this integration:  \n\n[GitLab CI/CD for external repositories](https://docs.gitlab.com/ee/ci/ci_cd_for_external_repos/) \n\n[Remote repository mirroring](https://docs.gitlab.com/ee/user/project/repository/repository_mirroring.html)\n\nWhat is a repository in DevOps?\n\nCode repositories in tools like GitLab and Azure exist to house all source code. Sometimes these repositories are referenced as a DevOps “repo” or a source repository. Whatever the title, code repositories provide a place where developers can work to ensure high code quality. \nGitLab uses a [git-based repository](/solutions/source-code-management/) for source code management with version control. It lets GitLab users perform code reviews and easily solve developer issues.\n\n## What is the difference between GitLab and Azure DevOps?\n\nAzure DevOps has a range of services for managing the development lifecycle. Some of its main features include agile planning boards, private git repos for source code management, and Azure pipelines.\n\nGitLab is a single platform for the entire DevSecOps lifecycle and includes the following:\n\n- Planning and collaboration\n- Source code management\n- Code reviews\n- CI/CD pipelines\n- Constant security scanning and monitoring\n- Advanced deployments\n- Vulnerability management\n\nGitLab can help manage the entire DevSecOps lifecycle to deliver software quickly and efficiently while bolstering security and compliance.\n\n## How do I connect to Azure from GitLab?\n\nIt may take some time to fully move over from Azure to GitLab for source code management. To smooth the transition, there are simple steps to connect to the Azure integration from GitLab.\n\n1. Create a new project in GitLab by clicking the New Project button  ![Create new project ](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado1.png){: .large.center}\n\n2. Choose the ‘CI/CD for external repo’ tab, and click on Repo by URL.  ![CI/CD for external repo](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado2.png){: .large.center}\n\n3. Open your repository in Azure DevOps and click Clone  ![Getting clone url ](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado3.png){: .large.center}\n\n4. Copy the URL. If your repository is private, you will need to generate Git credentials – just click this button and copy the username and password.  ![Credentials](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado4.png){: .large.center}\n\n5. Paste the URL in GitLab under the Git repository URL, give it a name, set the visibility level, and click create project. Add the username and password in case your Azure DevOps repository is private. Note: The repository must be accessible over http://, https:// or git://. When using the http:// or https:// protocols, please provide the exact URL to the repository. HTTP redirects will not be followed.  ![Create project form](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado5.png){: .large.center}\n\n6. Your project is now successfully Mirrored to GitLab. Now branches, tags, and commits will be synced automatically to GitLab. \n\n7. To configure a CI/CD pipeline there are two options:\n\nBefore pushing your first commit, open the CI/CD settings in GitLab and enable Auto DevOps.  It will set the CI/CD configuration, so each commit in Azure Repos will trigger a CI/CD  pipeline in GitLab which will build, test, and deploy your app.  ![Auto DevOps settings](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado6.png){: .shadow.large.center}\n \nAlternatively, in case you want to define the pipeline configuration yourself instead of using the Auto DevOps, add [.gitlab-ci.yml](https://docs.gitlab.com/ee/ci/yaml/) file to  your repository root directory. The Yaml code should include your [CI/CD definitions](/blog/guide-to-ci-cd-pipelines/). Once this file is included in the root directory a CI/CD pipeline will be triggered for each commit. If you are not familiar with .gitlab-ci.yml, start by creating a file with the name .gitlab-ci.yml and paste the below code to it. This code includes build and test stages, and a job that displays text to the console in each stage. Later on you can add additional scripts to each job, and also add additional jobs and stages. To create more complex pipelines, you can [use the pipeline templates](https://docs.gitlab.com/ee/ci/yaml/#includetemplate) that are [shipped with GitLab](https://gitlab.com/gitlab-org/gitlab/tree/master/lib/gitlab/ci/templates) instead of starting it from scracth.\n\n```\nstages:\n  - build\n  - test \n  \nbuild:\n  stage: build\n  script:\n    - echo \"Build job\"\n\ntest:\n  stage: test\n  script:\n    - echo \"Test job\"\n```\n\nThat’s it, you are all set! \n\n## Suggested development flow \n\n![Development flow diagram](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado_7_2.png){: .shadow.large.center}\n\n1. CODE (Developer IDE of choice) Developer uses the favorite IDE to develop code, clones the repo to the workstation and creates a branch.  ![Visual Studio Code](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado8.png){: .shadow.large.center}\n\n2. COMMIT (GIT) After the feature is developed/the bug is fixed, the developer pushes the work to the Azure Repository server.  ![Azure DevOps Repos](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado9.png){: .shadow.large.center}\n\n3. BUILD (GitLab) The branch with the commit history will be mirrored to GitLab. The CI/CD pipeline will be triggered. The pipeline will build the code.  ![GitLab pipeline graph](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado10.png){: .shadow.large.center}\n\n    Artifacts will be created, and be available for download.  ![Artifacts](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado11.png){: .shadow.large.center}\n\n    If Auto DevOps is enabled, a container image will be created and be pushed to the built-in Container Registry.  ![GitLab Container Registry](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado12.png){: .shadow.large.center}\n\n    In case a package registry is enabled in the project, packages will be published to the designated package manager.  ![GitLab Package Registry](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado13.png){: .shadow.large.center}\n\n4. TEST (GitLab) Security scans, license scans, and other tests are executed as part of the CI pipeline.  ![GitLab scans](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado14.png){: .shadow.large.center}\n\n5. REVIEW & PULL REQUEST (GitLab & Azure DevOps repos) Review pipeline results in GitLab and if the pipeline passed without errors, and the new change hasn’t introduced new vulnerabilities, the developer creates a pull request in Azure DevOps. A code review is started and the developer might need to make a few changes before merging to master. Each commit will trigger a CI/CD pipeline in GitLab.  ![Azure DevOps pull request](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado15.png){: .shadow.large.center}\n\n6. MERGE (Azure DevOps Repos and GitLab) The Azure DevOps pull request is approved and the branch will be merged to the master branch in the Azure DevOps Repository.\n\nDepending on your pipeline configuration, this merge to the master branch will trigger the CI/CD pipeline in GitLab to validate the merge results, build new packages and container images, and then deploy them.  ![GitLab CI/CD pipeline graph](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado16.png){: .shadow.large.center}\n\n## Development workflow demonstration \n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/HfpP2pEmkoM\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\n## A solution worth trying \n\nGitLab offers a leading source code management and CI/CD solution in one application which many [GitLab customers](/customers/) use together because of the power of this combination. However, we know that sometimes there are constraints that do not allow teams to migrate their repository to GitLab SCM, at least not right away. For these situations, even if it is only temporary, we offer the capability of GitLab CI/CD for external repositories illustrated here. \n\n\n**Read more about GitLab CI/CD:**\n\n[Forrester report compares between leading CI/CD tools](https://about.gitlab.com/analysts/forrester-cloudci19/)\n\n[Autoscale GitLab CI with AWS Fargate](/blog/introducing-autoscaling-gitlab-runners-on-aws-fargate/)\n\n[Case Study - how Goldman Sachs improved from 1 build every two weeks to over a thousand per day](https://about.gitlab.com/customers/goldman-sachs/)\n\nCover image by [Aleksey Kuprikov](https://unsplash.com/@alekskuprfilmz) on [Unsplash](https://unsplash.com/)\n{: .note}\n\n\n",[110,1646,726],{"slug":5125,"featured":6,"template":678},"integrating-azure-devops-scm-and-gitlab","content:en-us:blog:integrating-azure-devops-scm-and-gitlab.yml","Integrating Azure Devops Scm And Gitlab","en-us/blog/integrating-azure-devops-scm-and-gitlab.yml","en-us/blog/integrating-azure-devops-scm-and-gitlab",{"_path":5131,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5132,"content":5138,"config":5144,"_id":5146,"_type":16,"title":5147,"_source":17,"_file":5148,"_stem":5149,"_extension":20},"/en-us/blog/migrating-to-puma-on-gitlab",{"title":5133,"description":5134,"ogTitle":5133,"ogDescription":5134,"noIndex":6,"ogImage":5135,"ogUrl":5136,"ogSiteName":692,"ogType":693,"canonicalUrls":5136,"schema":5137},"How we migrated application servers from Unicorn to Puma","It's been a long journey but with the release of GitLab 13.0 Puma is our default application server. Here's what we did and learned along the way.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681413/Blog/Hero%20Images/appserverpuma.jpg","https://about.gitlab.com/blog/migrating-to-puma-on-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we migrated application servers from Unicorn to Puma\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Craig Gomes\"}],\n        \"datePublished\": \"2020-07-08\",\n      }",{"title":5133,"description":5134,"authors":5139,"heroImage":5135,"date":5141,"body":5142,"category":14,"tags":5143},[5140],"Craig Gomes","2020-07-08","\n\nIt’s been years in the making, but our journey to migrate our application servers from Unicorn to Puma is complete. With the Gitlab 12.9 release Puma was running on GitLab.com and now with 13.0 it is the default application server for everyone. This is the story about how we migrated from Unicorn to Puma and the results we’ve seen.\n\n## A starting point\n\nBoth [Unicorn](https://yhbt.net/unicorn/) and [Puma](https://puma.io) are web servers for Ruby on Rails. The big difference is that Unicorn is a single-threaded process model and Puma uses a multithreaded model. \n\nUnicorn has a multi-process, single-threaded architecture to make better use of available CPU cores (processes can run on different cores) and to have stronger fault tolerance (most failures stay isolated in only one process and cannot take down GitLab entirely). On startup, the Unicorn ‘main’ process loads a clean Ruby environment with the GitLab application code, and then spawns ‘workers’ which inherit this clean initial environment. The ‘main’ never handles any requests; that is left to the workers. The operating system network stack queues incoming requests and distributes them among the workers.\n\nUnlike Unicorn, Puma can run multiple threads for each worker. Puma can be tuned to run multiple threads and workers to make optimal use of your server and workload. For example, in Puma defining \"N workers\" with 1 thread is essentially equivalent to \"N Unicorn workers.\" In multi-threaded processes thread safety is critical to ensure proper functionality. We encountered one thread safety issue while migrating to Puma and we'll get to that shortly.\n\n### Technical Descriptions\n\nUnicorn is an HTTP server for Rack applications designed to only serve fast clients on low-latency, high-bandwidth connections and take advantage of features in Unix/Unix-like kernels. Slow clients should only be served by placing a reverse proxy capable of fully buffering both the the request and response in between unicorn and slow clients.\n\nPuma is a multi-threaded web server and our replacement for Unicorn. Unlike other Ruby Webservers, Puma was built for speed and parallelism. Puma is a small library that provides a very fast and concurrent HTTP 1.1 server for Ruby web applications. It is designed for running Rack apps only.\n\nWhat makes Puma so fast is the careful use of a Ragel extension to provide fast, accurate HTTP 1.1 protocol parsing. This makes the server scream without too many portability issues.\n\n## Why Puma?\n\nWe began early investigations into Puma believing it would help resolve some of our [memory growth issues](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/3700) and also to help with scalability. By switching from Unicorn's single threaded process we could cut down on the number of processes running and the memory overhead of each of these processes. Ruby processes take up a significant amount of memory.  Threads, on the other hand, consume a much smaller amount of memory than workers because they are able to share a significantly larger portion of application memory.  When I/O causes a thread to pause, another thread can continue with its application request. In this way, multi-thread makes the best use of the available memory and CPU, reducing memory consumption by [approximately 40%](/releases/2020/05/22/gitlab-13-0-released/#reduced-memory-consumption-of-gitlab-with-puma).\n\n## The early appearance of Puma\n\nThe first appearance of Puma in a GitLab issue was in a discussion about using [multithreaded application servers](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/3592), dating back to November 20, 2015. In our spirit of iteration, the first attempt at adding experimental support for Puma followed shortly after with a [merge request](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/1899) on November 25, 2015. The initial [results](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/3592#note_2805965) indicated a lack of stability and thus did not merit us moving forward with Puma at the time. While the push [to improve our memory footprint](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/25421) continued, the efforts to move forward with Puma stalled for a while.\n\n## Experimental development use\n\nIn May, 2018 Puma was configured for [experimental development use](https://gitlab.com/gitlab-org/gitlab-development-kit/-/merge_requests/532) in GitLab Rails and [Omnibus](https://gitlab.com/gitlab-org/omnibus-gitlab/-/merge_requests/2801). Later that year, we added [Puma metrics to Prometheus](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/52769) to track our internal experimental usage of Puma. By early spring of 2019 GitLab moved forward with the creation of the [Memory Team](/blog/why-we-created-the-gitlab-memory-team/) whose early set of identified tasks was to deploy Puma to GitLab.com.\n\n\n## Implementation steps\n\nThe efforts to implement Puma on GitLab.com and for our self-managed customers started in earnest in early 2019 with the [Enable Puma Web Server for GitLab](https://gitlab.com/groups/gitlab-org/-/epics/954) epic and the creation of the Memory Team. One of the early steps we took was to [enable Puma by default in the GDK ](https://gitlab.com/gitlab-org/gitlab-development-kit/-/issues/490) to get metrics and feedback from the community and our customers while we worked to deploy on GitLab.com.\n\nThe ability to measure the improvements achieved by the Puma deployment was critical to determining whether we had achieved our goals of overall memory reduction. To capture these metrics we set up [two identical environments](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/62877) to test changes on a daily basis. This would allow us to quickly make changes to the worker/thread ratio within Puma and quickly review the impact of the changes.\n\n### A roll out plan\n\nWe have multiple pre-production environments and we follow a progression of deploying Puma to each of these stages (dev->ops->staging->canary->production). Within each of these stages we would deploy the changes to enable Puma and test the changes. Once we confirmed a successful deployment we would measure and make configuration changes for optimal performance and memory reduction.\n\n### Issues and Tuning\n\nEarly on we determined that our usage of [ChronicDuration](https://gitlab.com/gitlab-org/gitlab/-/issues/31285) was not thread-safe. We ended up [forking the code](https://gitlab.com/gitlab-org/gitlab/-/issues/31285#note_215961555) and distributing our own [gitlab-chronic-duration](https://gitlab.com/gitlab-org/gitlab-chronic-duration) to solve our thread-safety issues.\n\nWe encountered only minor issues in the previous environments but once we deployed to Canary our infrastructure team reported some [unacceptable latency issues](https://gitlab.com/gitlab-com/gl-infra/infrastructure/-/issues/7455#note_239070865). We spent a significant amount of time tuning [Puma](https://gitlab.com/gitlab-com/gl-infra/infrastructure/-/issues/8334) for the optimal configuration of workers to threads. We also discovered some changes required to our [health-check endpoint](https://gitlab.com/gitlab-org/omnibus-gitlab/issues/4835) to ensure minimal to no downtime during upgrades.\n\n### Puma Upstream Patch\n\nAs we zeroed in on tuning GitLab.com with Puma we discovered that the capacity was not being evenly distributed. Puma capacity is calculated by `workers * threads`, so if you have 2 workers and 2 threads you have a capacity of 4. Since Puma uses round-robin to schedule requests, and no other criteria, we saw evidence of some workers being saturated while others sat idle. The simple [fix](https://github.com/puma/puma/pull/2079/files) proposed by [Kamil Trzcinski](https://gitlab.com/ayufan) was to make Puma inject a minimal amount of latency between requests if the worker is already processing requests. This would allow other workers (that are idle) to accept socket much faster than our worker that is already processing other traffic.\n\nYou can read more details about the discovery and research [here](https://gitlab.com/gitlab-com/gl-infra/infrastructure/issues/8334#note_247859173).\n\n## Our results\n\nOnce we deployed Puma to our entire web fleet we observed a drop in memory usage from 1.28T to approximately 800GB (approximately a 37% drop) while our request queuing, request duration and CPU usage all remained roughly the same.\n\nMore details and graphs can be found [here](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/1684#note_291225063). \n\nPuma is now on by default for all GitLab customers in the [GitLab 13.0 release](/releases/2020/05/22/gitlab-13-0-released/).\n\n## What's next\n\nWe want to review our infrastructure needs! The efficiency gains brought about by deploying Puma will allow us to re-examine the memory needs of Rails nodes in production. \n\nAlso, Puma has enabled us to continue to pursue our efforts to enable [real time editing](https://gitlab.com/groups/gitlab-org/-/epics/52). \n\n**More about GitLab's infrastructure:**\n\n[How we scaled Sidekiq](/blog/scaling-our-use-of-sidekiq/)\n\n[Make your pipelines more flexible](/blog/directed-acyclic-graph/)\n\n[The inside scoop on the building of our Status Page](/blog/how-we-built-status-page-mvc/)\n\nCover image by [John Moeses Bauan](https://unsplash.com/@johnmoeses) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[915,749,704],{"slug":5145,"featured":6,"template":678},"migrating-to-puma-on-gitlab","content:en-us:blog:migrating-to-puma-on-gitlab.yml","Migrating To Puma On Gitlab","en-us/blog/migrating-to-puma-on-gitlab.yml","en-us/blog/migrating-to-puma-on-gitlab",{"_path":5151,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5152,"content":5158,"config":5164,"_id":5166,"_type":16,"title":5167,"_source":17,"_file":5168,"_stem":5169,"_extension":20},"/en-us/blog/secure-stage-for-appsec",{"title":5153,"description":5154,"ogTitle":5153,"ogDescription":5154,"noIndex":6,"ogImage":5155,"ogUrl":5156,"ogSiteName":692,"ogType":693,"canonicalUrls":5156,"schema":5157},"How GitLab's application security dashboard helps AppSec engineers","GitLab Security features help application security engineers collaborate more efficiently and better assess the security posture of the projects they oversee.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663482/Blog/Hero%20Images/ralph-kayden-4Cg5T03B_8s-unsplash.jpg","https://about.gitlab.com/blog/secure-stage-for-appsec","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab's application security dashboard helps AppSec engineers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2020-07-07\",\n      }",{"title":5153,"description":5154,"authors":5159,"heroImage":5155,"date":5161,"body":5162,"category":14,"tags":5163},[5160],"Fernando Diaz","2020-07-07","\n[Application Security (AppSec)](/topics/devsecops/) engineers focus on enhancing an application's security, by\nfinding, resolving, and preventing vulnerabilities. But managing all these\nvulnerabilities across different teams and projects is not an easy process. Managing vulnerabilities\ncan be simplified by using the [GitLab Secure](/stages-devops-lifecycle/secure/)\nfeatures found in [GitLab Ultimate](/pricing/ultimate/).\n\nOne of the significant capabilities of GitLab Secure is the accurate, automated, and continuous assessment of the\nsecurity of your applications and services through a unified dashboard.\n\nIn this blog post, I will show four ways GitLab Secure makes life easier for the AppSec\nengineer.\n\n---\n\n## Finding vulnerabilities with security scans\n\nThe first capability that AppSec engineers will find useful is the robust security scanning capabilities in [GitLab Ultimate](/pricing/ultimate/).\n\nThese capabilities allow you to proactively identify vulnerabilities and weaknesses to minimize your security risk\nusing a variety of defense-in-depth techniques. The security scans include the following:\n\n* [Static Application Security Testing (SAST)](https://docs.gitlab.com/ee/user/application_security/sast/)\n* [Dynamic Application Security Testing (DAST)](https://docs.gitlab.com/ee/user/application_security/dast/)\n* [Container Scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/)\n* [Dependency Scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/)\n* [License Scanning](https://docs.gitlab.com/ee/user/compliance/license_compliance/index.html)\n\n![pipeline with security scans](https://about.gitlab.com/images/blogimages/secure-stage-for-infosec-professionals/pipeline.png)\nGitLab pipeline running security scans\n{: .note.text-center}\n\nSimply add a [template](https://gitlab.com/gitlab-org/gitlab/-/tree/master/lib/gitlab/ci/templates/Security)\nto your [.gitlab-ci.yml](https://docs.gitlab.com/ee/ci/yaml/#includetemplate) or by enable [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) to set-up the scans.\n\nWhen submitting a merge request (MR), the security scans will run and populate the MR with\ndata on the vulnerabilities detected and how to resolve them. This data allows AppSec engineers\nto begin risk analysis and remediation.\n\n## Managing vulnerabilities with the Security Dashboard\n\nThe second most useful capability for AppSec engineers is the [Security Dashboard](https://docs.gitlab.com/ee/user/application_security/security_dashboard/), which helps\nkeep projects organized and summarizes the relevant security details for an application, all in one place.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/t-3TSlChHy4\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nThe Security Dashboard in [GitLab Ultimate](/pricing/ultimate/) provides a high-level overview of the status of all the vulnerabilities\ndetected in groups, projects, and pipelines.\n\n![security dashboard with group view](https://about.gitlab.com/images/blogimages/secure-stage-for-infosec-professionals/dashboard-group.png)\nSecurity Dashboard Group-Level view\n{: .note.text-center}\n\nBy using the Security Dashboard, an AppSec engineer can drill down into each\nvulnerability to obtain additional information, such as how to resolve the vulnerability,\nhow it was handled by the developer, and if a work ticket (or GitLab issue) has been opened\nfor remediation.\n\nThe Security Dashboard also shows which file the vulnerability was detected in. Each vulnerability\nis assigned a severity and a report type. By using this information an AppSec Engineer\ncan quickly identify which items is the most critical for the team to tackle first.\n\n![security dashboard with project view](https://about.gitlab.com/images/blogimages/secure-stage-for-infosec-professionals/dashboard-project.png)\nSecurity Dashboard project-level view\n{: .note.text-center}\n\nIt's important to note the Security Dashboard supports integrations with third-party scanners.\nFor example, if you are using [WhiteSource](https://www.whitesourcesoftware.com/gitlab/), the\nscans results can added to and managed in the Security Dashboard.\n\n## Auditing with the Security Dashboard\n\nA third capability GitLab Secure offers AppSec engineers is auditing. The engineer can use this capability to audit\na project or group based on the vulnerabilities revealed in various tests. By using the Security Dashboard,\nthe AppSec engineer can see which vulnerabilities have been dismissed, the developer who dismissed them, as\nwell as the reason why they were dismissed. This is helpful for several reasons:\n\n* Check to make sure the development team is practicing secure coding\n* Confirm there are no malicious actors dismissing issues\n* Keep track of the status of vulnerabilities which could not be immediately resolved\n\n![security dashboard vulnerability info](https://about.gitlab.com/images/blogimages/secure-stage-for-infosec-professionals/dashboard-info.png)\nVulnerability info displayed in Security Dashboard\n{: .note.text-center}\n\nAn AppSec engineer can also track and create [confidential issues](https://docs.gitlab.com/ee/user/project/issues/confidential_issues.html) from the\nSecurity Dashboard. A team can keep track of the status\nof a vulnerability in private, and make sure it is still on track to being resolved when using confidential issues. A\nconfidential branch can be created along with the issue, so that the development team\ncan work on a resolution without tipping off malicious actors.\n\n![security dashboard confidential issue creation](https://about.gitlab.com/images/blogimages/secure-stage-for-infosec-professionals/dashboard-issue-creation.png)\nConfidential issues created via Security Dashboard\n{: .note.text-center}\n\n## Managing software licenses\n\nThe final capability we recommend AppSec engineers use is our license management.\n\nTypically, developers will use a variety of open source dependencies instead of reinventing the wheel.\nThere is a problem though: using a dependency with a restrictive license can invalidate your application.\n\nAn AppSec engineer is able to add a policy to mark licenses as acceptable or unacceptable for a project and its dependencies.\nIf an unacceptable license is found, the MR can be blocked. The video below provides\nan overview:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/42f9LiP5J_4\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nGitLab Secure capabilities enhance the effiency of AppSec engineers, ultimately\nleading to the production of more secure applications and a more security-empowered\ndevelopment team. Learn more at [DevSecOps](/solutions/security-compliance/) and\ncheckout the [GitLab Secure direction page](/direction/secure/) for more\ninformation on the upcoming features and integrations.\n\n### Level up your DevSecOps knowledge:\n\n  [GitLab's security tools and the HIPAA risk analysis](https://about.gitlab.com/blog/gitlab-security-tools-and-the-hipaa-risk-analysis/)\n  [A deep dive into the Security Analyst persona](https://about.gitlab.com/blog/a-deep-dive-into-the-security-analyst-persona/)\n  [Compliance made easy with GitLab](https://about.gitlab.com/blog/compliance-made-easy/)\n\nCover image by [Ralph Kayden](https://unsplash.com/@ralphkayden) on [Unsplash](https://unsplash.com/photos/4Cg5T03B_8s)\n{: .note}\n\n## Learn more about DevSecOps\n\n- [Efficient DevSecOps: 9 tips for shifting left](/blog/efficient-devsecops-nine-tips-shift-left/)\n- [Want better DevSecOps? Try cross-functional collaboration](/blog/achieve-devsecops-collaboration/)\n- [Compliance made easy with GitLab](/blog/compliance-made-easy/)\n\n\u003C%= partial \"includes/blog/blog-merch-banner\" %>\n",[1307,894,727,1646],{"slug":5165,"featured":6,"template":678},"secure-stage-for-appsec","content:en-us:blog:secure-stage-for-appsec.yml","Secure Stage For Appsec","en-us/blog/secure-stage-for-appsec.yml","en-us/blog/secure-stage-for-appsec",{"_path":5171,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5172,"content":5178,"config":5183,"_id":5185,"_type":16,"title":5186,"_source":17,"_file":5187,"_stem":5188,"_extension":20},"/en-us/blog/beginner-guide-ci-cd",{"title":5173,"description":5174,"ogTitle":5173,"ogDescription":5174,"noIndex":6,"ogImage":5175,"ogUrl":5176,"ogSiteName":692,"ogType":693,"canonicalUrls":5176,"schema":5177},"GitLab’s guide to CI/CD for beginners","CI/CD is a key part of the DevOps journey. Here’s everything you need to understand about this game-changing process.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681391/Blog/Hero%20Images/beginnercicd.jpg","https://about.gitlab.com/blog/beginner-guide-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab’s guide to CI/CD for beginners\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-07-06\",\n      }",{"title":5173,"description":5174,"authors":5179,"heroImage":5175,"date":5180,"body":5181,"category":14,"tags":5182},[2150],"2020-07-06","\n\nContinuous integration and [continuous delivery/deployment](/topics/continuous-delivery/) (most often referred to as CI/CD) are the cornerstones of [DevOps](/topics/devops/) and any modern software development practice. Here’s everything you need to know about [CI/CD for beginners](/blog/how-to-keep-up-with-ci-cd-best-practices/).\n\n## What CI/CD means\n\nIf your software development process involves a lot of stopping, starting and handoffs, [CI/CD](/topics/ci-cd/) may be just what you’re looking for. A CI/CD pipeline is a seamless way for developers to make changes to code that are then automatically tested and pushed out for delivery and deployment. The goal is to eliminate downtime. Get CI/CD right and you’re well on the road to successful DevOps and dramatically faster code release. In our [2020 Global DevSecOps Survey](/blog/devsecops-survey-released/), nearly 83% of survey takers said they’re getting code out the door more quickly thanks to DevOps.\n\n## Understand CI/CD basics\n\nIf you’re not sure what a pipeline is, or how the entire process works, here’s a [detailed explanation](/blog/a-beginners-guide-to-continuous-integration/) of how all the moving parts work together to make software development quicker and easier.\n\n## Four benefits of CI/CD\n\nYes, CI/CD helps speed up delivery of code but it also makes for happier software developers. At a time when there continues to be [a worldwide shortage of software developers](https://www.gartner.com/en/newsroom/press-releases/2019-01-17-gartner-survey-shows-global-talent-shortage-is-now-the-top-emerging-risk-facing-organizations), it’s critical to retain technical talent. Developer job satisfaction is just one of [four key benefits](/blog/positive-outcomes-ci-cd/) that come from implementing a CI/CD process.\n\n## How to pick the right CI/CD tool\n\nNow that you’re sold on the [benefits of CI/CD](/topics/ci-cd/benefits-continuous-integration/) it’s time to choose a tool. There are a number of considerations, from [budget to room for growth](/topics/ci-cd/choose-continuous-integration-tool/) so it’s worth taking the time to think it through.\n\n## How to make the business case for CI/CD\n\nTo tie a CI/CD process to ROI isn’t difficult, but it’s an important step to take to get management buy-in. Here are [three factors to consider](/blog/modernize-your-ci-cd/) – including the hidden cost of toolchain sprawl – as you make the case for CI/CD.\n\n## Take 20 minutes and build a CI/CD pipeline\n\nOk, enough talking about theoreticals... it’s time to do something. Using GitLab’s [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) functionality, you can [move from code to production](/blog/building-a-cicd-pipeline-in-20-mins/) in just two simple steps and in only 20 minutes (no, really, just 20 minutes).\n\n## Next stop: Kubernetes!\n\nFinally, you can tie your GitLab CI pipeline into Google Kubernetes Engine (GKE) and as a bonus it takes only 15 minutes. Our [step-by-step tutorial](/blog/gitlab-ci-on-google-kubernetes-engine/) is completely beginner-friendly.\n\n**Level up your CI/CD knowledge:**\n\n[How CI can put the \"Sec\" in DevSecOps](/blog/solve-devsecops-challenges-with-gitlab-ci-cd/)\n\n[Autoscale GitLab CI with AWS Fargate](/blog/introducing-autoscaling-gitlab-runners-on-aws-fargate/)\n\n[Get started with parent-child pipelines](/blog/parent-child-pipelines/)\n\nCover image by [Kyle Glenn](https://unsplash.com/@kylejglenn) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[110,894,1002],{"slug":5184,"featured":6,"template":678},"beginner-guide-ci-cd","content:en-us:blog:beginner-guide-ci-cd.yml","Beginner Guide Ci Cd","en-us/blog/beginner-guide-ci-cd.yml","en-us/blog/beginner-guide-ci-cd",{"_path":5190,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5191,"content":5196,"config":5202,"_id":5204,"_type":16,"title":5205,"_source":17,"_file":5206,"_stem":5207,"_extension":20},"/en-us/blog/scaling-our-use-of-sidekiq",{"title":5192,"description":5193,"ogTitle":5192,"ogDescription":5193,"noIndex":6,"ogImage":3459,"ogUrl":5194,"ogSiteName":692,"ogType":693,"canonicalUrls":5194,"schema":5195},"How we scaled async workload processing at GitLab.com using Sidekiq","Sidekiq was a great tool for async processing until it couldn't keep up. Here's how we made it scale.","https://about.gitlab.com/blog/scaling-our-use-of-sidekiq","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we scaled async workload processing at GitLab.com using Sidekiq\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rachel Nienaber\"}],\n        \"datePublished\": \"2020-06-24\",\n      }",{"title":5192,"description":5193,"authors":5197,"heroImage":3459,"date":5199,"body":5200,"category":14,"tags":5201},[5198],"Rachel Nienaber","2020-06-24","## Sidekiq at GitLab\n\nGitLab is a Ruby-on-Rails application that processes a lot of data. Much of this processing can be done asynchronously,\nand one of the solutions we use to accomplish this is [Sidekiq](https://github.com/mperham/sidekiq/wiki) which is a background-processing\nframework for Ruby. It handles jobs that are better processed asynchronously outside the web request/response cycle.\n\nThere are a few terms that that we'll use in this post:\n\n* A **worker class** is a class defined in our application to process a task in Sidekiq.\n* A **job** is an instance of a worker class, so each job represents a single task.\n* A **queue** is a collection of jobs (potentially for different worker classes) that are waiting to be processed.\n* A **worker thread** is a thread processing jobs in particular queues. Each Sidekiq process can have multiple worker threads.\n\nThen there are two terms specific to GitLab.com:\n\n* A **Sidekiq role** is a configuration for a particular group of queues. For instance, we might have a `push_actions` role that is for processing the `post_receive` and `process_commit` queues.\n* A **Sidekiq node** is an instance of the GitLab application for a Sidekiq role. A Sidekiq node can have multiple Sidekiq processes.\n\nBack in 2013, in version 6.3 of GitLab, every Sidekiq worker class had its own queue. We weren't strict in monitoring the creation of\nnew worker classes. There was no strategic plan for assigning queues to where they would execute.\n\nIn 2016, we tried to introduce order again, and rearranged the queues to be based on features. We followed this with a change in\n2017 to have a dedicated queue for each worker class again, and we were able to monitor queues more accurately and impose specific\nthrottles and limits to each. It was easy to quickly make decisions about the queues as they were running because of how\nthe work was distributed. The queues were grouped, and the names of these groups were `realtime`, `asap`, and `besteffort` for example.\n\nAt the time, we knew that this was not the approach recommended by the author of Sidekiq, Mike Perham, but we felt that we knew what\nthe trade-offs were. In fact, Mike wrote: \n\n> “I don't recommend having more than a handful of queues. Lots of queues makes for a more complex\n> system [and Sidekiq Pro cannot reliably](https://github.com/antirez/redis/issues/1785) handle multiple queues without\n> polling. M Sidekiq Pro processes polling N queues means O(M*N) operations per second slamming Redis.”\n\nFrom [https://github.com/mperham/sidekiq/wiki/Advanced-Options#queues](https://github.com/mperham/sidekiq/wiki/Advanced-Options#queues)\n\nThis served us well for nearly two years before this approach no longer matched our scaling needs.\n\n### Pressure from availability issues\n\nIn mid-2019 GitLab.com experienced several different major incidents related to the way we\nprocess background queues.\n\nExamples of these incidents:\n- [Gitaly n+1 calls caused bad latency and resulted in the Sidekiq queues growing](https://gitlab.com/gitlab-com/gl-infra/infrastructure/issues/7479).\nThis was due to the way we processed tags in Gitaly.\n- A user generated many notes on a single commit which [slowed down the new_note Sidekiq queue](https://gitlab.com/gitlab-com/gl-infra/production/issues/1028)\nand led to a delay of sending out notifications.\n- CI jobs took very long to complete because [jobs in the pipeline_processing:pipeline_process Sidekiq queue piled up](https://gitlab.com/gitlab-com/gl-infra/infrastructure/-/issues/7402).\n2 pipelines caused a high amount of Sidekiq jobs, Sidekiq pipeline nodes were maxing out their CPU, pipeline_processing\njobs were causing many SQL calls and the pgbouncer pool for Sidekiq was becoming saturated.\n\nAll of these were showing that we needed to take action.\n\n![Sidekiq throughput per job](https://about.gitlab.com/images/blogimages/sidekiq_throughput_per_job.png){: .shadow}\n\nThis image shows how many jobs we process per second over a 24 hour period. This shows the variety of jobs and\ngives an idea of the scale of jobs in relation to each other.\n\n### Improvements\n\n#### Changing the relationship between jobs and Sidekiq roles\n\nIn [infrastructure#7219 (closed)](https://gitlab.com/gitlab-com/gl-infra/infrastructure/issues/7219) we significantly\naltered our approach for how jobs were related to Sidekiq roles.\n\nWe started from a position where:\n1. We had a many-to-many relationship between Sidekiq jobs and Sidekiq roles.\n   1. For example, most pipeline jobs ran on the `besteffort` nodes, but some ran on the pipeline nodes.\n   1. Some jobs ran on up to three types of node: eg `realtime`, `asap` and `besteffort` priorities.\n1. Worker threads were reserved for single queues.\n   1. For example, one eighth of the `realtime` queue might be reserved for new_note jobs. In the event of a glut of\n  new_note jobs, most of the fleet would sit idle while one worker thread would be saturated. Worse, adding more nodes would\n  only increase processing power by 1/8th of a node, not the full compute capacity of the new node.\n1. Urgent and non-urgent jobs would be in the same queue.\n   1. For example, some jobs in the `realtime` queue would take up to 10 minutes to process.\n   1. This is a bit like allowing overloaded trolleys in the 10 items-or-less lane.\n\nOnce the issue was completed, we now had:\n1. A one-to-one relationship between Sidekiq jobs and Sidekiq roles\n   1. Each job will execute on exactly one Sidekiq role\n1. All worker threads will run all jobs, and each Sidekiq node will have the same number of worker threads\n   1. When a glut of jobs comes in, 100% of compute on a node can be dedicated to executing the jobs\n1. Slow jobs and fast jobs are kept apart\n   1. The 10 items or less lane is now being enforced.\n\nWhile this was a significant improvement, it introduced some technical debt. We fixed everything for a moment in time,\nknowing that as soon as the application changed this would be out of date, and as time went on, would only get more out\nof date until we were back in the same position. To try and mitigate this in future, we started to look at classifying\nthe workloads and using queue selectors.\n\n#### Queue Selectors Deployed in Sidekiq Cluster\n\nIn the\n[Background Processing Improvements Epic](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/96), we looked at ways\nthat we could simplify the structure so that background processing could be in a position to scale to 100x the traffic\nat the time. We also needed the processing to be unsurprising. Operators (and developers) should understand where a job\nwill run, why it is queueing up and how to reduce queues. We decided to move to using [queue selectors](https://docs.gitlab.com/ee/administration/sidekiq/extra_sidekiq_processes.html)\nto help us to keep the queue definitions correct. (This approach is still experimental).\n\nIn addition, the infrastructure team should not reactively (and manually) route Sidekiq jobs to priority fleets, as\nwas the situation previously. Developers should have the ability to specify the requirements of their workloads and\nhave these automatically processed on a queue designed to support that type of work.\n\nSidekiq processes can be configured to select specific queues for processing. Instead of making this selection by name,\nwe wanted to make the selection on how the workload for that queue was classified.\n\nWe came up with an approach for classifying background jobs by their workload and building a sustainable way of grouping\nsimilar workloads together.\n\nWhen a new job is created, developers need to do this to classify the workload. This is done through\n- Specifying the [urgency of the job](https://docs.gitlab.com/ee/development/sidekiq/index.html). The options\nare `high`, `low` and `none`. If the delay of a job would have user impact, then the job is `high` urgency.\n- Noting if the [job has external dependencies](https://docs.gitlab.com/ee/development/sidekiq/index.html)\nthat could impact their availability. (For example, if they communicate with user-specified Kubernetes clusters).\n- Adding an [annotation declaring if the worker class will be cpu-bound or memory-bound](https://docs.gitlab.com/ee/development/sidekiq/index.html). Knowing\nthis allows us to make decisions around how much thread concurrency a Ruby process can tolerate, or targeting memory-bound\njobs to low-concurrency, high-memory nodes.\n\nThere is additional guidance available to [determine if the worker class should be marked as cpu-bound](https://docs.gitlab.com/ee/development/sidekiq/index.html).\n\n#### SLAs are based on these attributes\n\n1. High urgency jobs should not queue for more than 10 seconds.\n1. High urgency jobs should not take more than 10 seconds to execute (this SLA is the responsibility of the owning team to ensure that high throughput is maintained).\n1. Low urgency jobs should not queue for more than 1 minute.\n1. Jobs without urgency have no queue SLA.\n1. Non-high urgency jobs should not take more than 5 minutes to execute.\n\nIn each case, the queuing SLAs are the responsibility of the infrastructure team, as they need to ensure that the fleet is\ncorrectly provisioned to meet the SLA.\n\nThe execution latency SLAs are the responsibility of the development team owning the worker class, as they need to ensure that the\nworker class is sufficiently performant to ensure throughput.\n\n![Sidekiq certain queues spike](https://about.gitlab.com/images/blogimages/sidekiq_authorized_projects.png){: .shadow}\n\nThis image shows the challenges we faced by having jobs of different urgency running on the same queue.\nThe purple lines show spikes from one particular worker, where many jobs were added to the queue,\ncausing delays to other jobs which were often of equal or higher importance.\n\n### Challenge during rollout - BRPOP\n\nAs the number of background queues in the GitLab application grows, this approach continues to burden our Sidekiq Redis\nservers. On GitLab.com, our `catchall` Sidekiq nodes monitor about 200 queues, and the Redis [BRPOP](https://redis.io/commands/brpop)\ncommands used to monitor the queues consume a significant amount of time (by Redis latency standards).\n\nThe number of clients listening made this problem worse. For `besteffort` we had 7 nodes, each running 8 processes,\nwith 15 threads watching those queues - meaning 840 clients.\n\nThe command causing the problem was BRPOP. The time taken to perform this command also relates\nto the number of listeners on those keys. The addition of multiple keys increases contention in the system which causes\nlots of connections to block. And when the key list is longer the problem gets worse. The keylist represents the number of\nqueues, the more queues we have, the more keys we are listening to. We saw this problem on the nodes that process the most queues.\n\nWe raised an issue in the Redis issue tracker about the [performance we observed when many clients performed BRPOP on the\nsame key](https://github.com/antirez/redis/issues/7071). It was fantastic when [Salvatore](https://github.com/antirez)\nresponded within the hour and the patch was available the same day!  This fix was made in Redis 6 and backported to Redis 5.\n[Omnibus has also been upgraded to use this fix](https://gitlab.com/gitlab-org/omnibus-gitlab/-/merge_requests/4126), and it will\nbe available in the major release 13.0.\n\n### Current State (as of June 2020)\n\nMigrating to these new selectors has been completed as of late April 2020.\n\nWe reduced our Sidekiq fleet from 49 nodes with 314 CPUs, to 26 nodes with 158 CPUs. This has also reduced our cost.\nThe average utilization is more evenly spread across the new fleets.\n\nAlso, we have [moved Sidekiq-cluster to Core](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/181). Previously, running\nSidekiq in clustered mode (i.e. spawning more than one process) was\ntechnically only available as part of GitLab EE distributions, and for self-managed environments only in the Starter+ tiers.\nBecause of that, when booting Sidekiq up in a development env with the GDK, the least common denominator was assumed,\nwhich was to run Sidekiq in a single-process setup. That can be a problem, because it means there is a divergence between\nthe environment developers work on, and what will actually run in production (i.e. gitlab.com and higher-tier self-managed environments).\n\nIn [release 13.0](/releases/2020/06/22/gitlab-13-1-released/) Sidekiq Cluster is used by default.\n\nWe’re also better placed to migrate to Kubernetes.  The selector approach is a lot more compatible with making good\ndecisions about things like CPU allocations + limits for Kubernetes workloads, and this will make the job of our delivery\nteam easier, leading to further cost reductions from auto-scaling deployed resources to match actual load.\n\nOur next piece of work with Sidekiq will be to [reduce the number of queues that we need to watch](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/194)\nand we will post a follow-up to this blog post when the work is completed.\n\n**Read more about infrastructure issues:**\n\n[Faster pipelines with DAG](/blog/directed-acyclic-graph/)\n\n[Keep Kubernetes runners moving](/blog/best-practices-for-kubernetes-runners/)\n\n[Understand parent-child pipelines](/blog/parent-child-pipelines/)\n\nCover image by [Jerry Zhang](https://unsplash.com/@z734923105) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[915,704,727],{"slug":5203,"featured":6,"template":678},"scaling-our-use-of-sidekiq","content:en-us:blog:scaling-our-use-of-sidekiq.yml","Scaling Our Use Of Sidekiq","en-us/blog/scaling-our-use-of-sidekiq.yml","en-us/blog/scaling-our-use-of-sidekiq",{"_path":5209,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5210,"content":5216,"config":5221,"_id":5223,"_type":16,"title":5224,"_source":17,"_file":5225,"_stem":5226,"_extension":20},"/en-us/blog/best-practices-for-kubernetes-runners",{"title":5211,"description":5212,"ogTitle":5211,"ogDescription":5212,"noIndex":6,"ogImage":5213,"ogUrl":5214,"ogSiteName":692,"ogType":693,"canonicalUrls":5214,"schema":5215},"Best practices to keep your Kubernetes runners moving","In a presentation at GitLab Commit San Francisco, a senior software engineer from F5 Networks shares some best practices for working with Kubernetes runners.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681341/Blog/Hero%20Images/trackandfield.jpg","https://about.gitlab.com/blog/best-practices-for-kubernetes-runners","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Best practices to keep your Kubernetes runners moving\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-05-27\",\n      }",{"title":5211,"description":5212,"authors":5217,"heroImage":5213,"date":5218,"body":5219,"category":14,"tags":5220},[3676],"2020-05-27","\n\nSometimes in software engineering, you have to learn the hard way. GitLab CI is extremely powerful and flexible, but it’s also easy to make mistakes that could take out a GitLab runner, which can clog up Sidekiq and bring down your entire GitLab instance.\n\nLuckily, Sean Smith, senior software engineer for F5 Networks has been through it, and summarizes some of their learnings in [his talk at GitLab Commit San Francisco](https://www.youtube.com/watch?v=Hks5ElUxkP4). In the presentation, Sean goes in-depth about a past incident that clogged up F5 Network's GitLab runner, and shares tips on setting limits for Kubernetes (K8s) runners.\n\nSean is a GitLab administrator for [F5 Networks](https://www.f5.com/), a company with about 1,800 users worldwide running 7,500 projects each month – excluding forks. That’s roughly 350,000 - 400,000 CI jobs going through the K8s runners each month. Until some recent hires, there were only three engineers to handle it all.\n\nInstead of running a giant GitLab instance on one VM, F5 broke up their instance into seven different servers: Two HA web servers, one PostGres server, PostGres replica, Sidekiq, Gitaly (our Git filesystem), and Redis.\n\n## Keep your GitLab runners up and moving\n\nF5 uses two types of GitLab runners:\n\n*   Kubernetes: About 90% of F5 jobs go through K8s\n*   Docker: Docker machine is run on-prem and in the cloud\n\n**Why use Docker?** F5 uses Docker to configure cluster networks in different jobs as well as for unit testing. Since the Docker machine can run on-prem and also in the cloud, it’s easy to have a VM dedicated to the job that allows you to manage those Docker images and Docker containers and set up your cluster networking topology within Docker, so you can run your tests and tear it down afterward without affecting other users. This isn’t something that is really possible in Kubernetes runners.\n\nOtherwise, F5 Networks uses Kubernetes, but keeping your K8s up and running isn’t necessarily foolproof.\n\n### CI jobs can spawn\n\nSometimes, a seemingly benign coding error can create unanticipated consequences for your Kubernetes runners.\n\nOne time, an F5 Engineer decided to use a GitLab CI job to automatically configure different settings on various jobs and projects. It made sense to configure using GitLab CI because the engineer wanted to be able to use [Git for version control](/topics/version-control/). Version control makes it easier for the team to iterate on the code transparently. He wrote the code to run the job.\n\nBut, he didn’t read the fine print in the library he was using. The code he wrote looked for the project ID, and if it found the project ID, runs the pipeline once per hour at the 30-minute mark. The assumption was that if there was already a matching scheduled task, the create function would not create a duplicate. Unfortunately, this was not the case. The code he ran caused the number of CI jobs to grow exponentially.\n\n![The code that clogged the K8s runner with GitLab CI jobs for F5 Networks](https://about.gitlab.com/images/blogimages/problemcode.png){: .shadow}\nThe code that clogged the K8s runner with GitLab CI jobs for F5 Networks. Can you see the problem yet?\n{: .note.text-center}\n\n\"You schedule a job, then next you schedule another job so now you've got two jobs scheduled, and then you've got four jobs scheduled, and then eight, after 10 iterations, you get around the 1,024 jobs scheduled and after 1,532,000 jobs, if this was allowed to run for 24 hours, you would end up with 16.7 million jobs being scheduled by the 24th hour,\" says Sean.\n\nIn short: Chaos. Remember, F5 Networks has a CI pipeline capacity of 350,000 to 400,000 jobs per month, so 16.7 million jobs in 24 hours could easily clog the system, taking down the K8s nodes, as well as GitLab nodes.\n\nLuckily, there’s a simple enough fix. First, identify which project is causing the problem, and disable CI on the project so it can’t create any new jobs. Next, kill all the pending jobs by [running this snippet](https://gitlab.com/snippets/1924269).\n\n```\n# gitlab-rails console\np = Project.find_by_full_path(‘rogue-group/rogue-project’)\nCi::Pipeline.where(project_id: p.id).where(status: ‘pending’).each {|p| p.cancel}\nexit\n```\n\nIt’s really a judgment call whether to kill a running job or not. If a job is currently running and is going to take all of 30 seconds then maybe don’t bother killing it, but if the job is going to take 30 minutes then consider killing it to free up resources for your users.\n\nF5 learned a lesson here and set up a monitoring alert to help ensure the job queue doesn’t back up like that again. The Cron job checks to make sure F5 is not exceeding a preestablished threshold on the number of jobs in a pending state. The alert links to a dashboard and also includes the full playbook for how to resolve the problem (because let’s face it, nobody is at their best when troubleshooting bleary-eyed at 3 a.m.). At first there were some false positives, but now the alerting has been fine-tuned and the system saved F5 from two outages so far.\n\n### Push it to the limit\n\nThe fact is, nobody has an unlimited cloud budget, and even if you're on-prem, resources are even more constrained for users that rely upon hardware. Sean says that F5 soon realized that, to meet the needs of all users, sensible limits had to be established so one or two mega-users didn't devour all their resources. He has some tips on how to set limits in your Kubernetes and GitLab runners.\n\nWhile some users may be disgruntled that cloud limits exist and are enforced, the best method is to keep an open dialogue with users about the limits while recognizing that projects expand and grow over a period of time.\n\nFortunately you can set the limits yourself and don’t have to rely on the goodwill of your users to conserve CPU. Kubernetes allows limits by default, and GitLab supports K8s request and limits. The K8s scheduler uses requests to determine which nodes to run the workload on. Limits will kill a job if the job exceeds the predefined limit – there can be different requests and limits but if requests aren’t specified and limits are, the scheduler will use the limits to determine the request value.\n\n[Take a peek at what F5 configured the limits for their Kubernetes GitLab runner](https://gitlab.com/snippets/1926912).\n\n```ruby\nconcurrent = 200\nlog_format = \"json\"\n[[runners]]\n  name = \"Kubernetes Gitlab Runner\"\n  url = \"https://gitlab.example.com/ci\"\n  token = \"insert token here\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    namespace = \"gitlab-runner\"\n    service-account = \"gitlab-runner-user\"\n    pull_policy = \"always\"\n\n    # build container\n    cpu_limit = \"2\"\n    memory_limit = \"6Gi\"\n\n    # service containers\n    service_cpu_limit = \"1\"\n    service_memory_limit = \"1Gi\"\n\n    # helper container\n    helper_cpu_limit = \"1\"\n    helper_memory_limit = \"1Gi\"\n```\n\n\"We have got currency of 200 jobs, so it will at max spawn 200 jobs and you'll see that we are limiting the CPU use on the build container to two and memory to six gigabytes, and on the helper and service CPU and memory limits, we have one CPU and one gig of memory each,\" says Sean. \"And so it gives you that flexibility to break it out because generally, you don't necessarily need as much CPU or as much memory on a service that you're spending up in your CI job.\"\n\n## What comes first: Setting up Kubernetes runners or establishing limits?\n\n[DevOps](/topics/devops/) is a data-driven practice, so the idea of setting limits to conserve resources without any underlying data about what users are doing can seem counterintuitive. If you’re migrating to Kubernetes runners from a Docker runner or a shell runner, it’s easy enough to extrapolate the numbers to establish limits as you set up your Kuberntes runners.\n\nIf you’re brand-new to GitLab and GitLab CI, then it’s kind of a shot in the dark. Think about your bills and resource constraints: How much memory and CPU is available? Is anything else running on your K8s cluster. Chances are, your guesses will be incorrect – but that’s OK.\n\nIt might sound obvious, but if you’re running a hosted application on the same K8s cluster as your GitLab CI jobs, don’t set limits based on the capacity of a full K8s cluster. Ideally, you’d have a separate K8s cluster for GitLab CI jobs, but that isn’t always possible.\n\n### How F5 Networks did it\n\nF5 Networks started with a small team of roughly 50 people and maybe 100 projects in GitLab – so setting a limit on K8s wasn’t a major concern until the company and, as a result, projects, started to grow.\n\nOnce it came time to set limits to their preexisting K8s runners, the first step was to enable the K8s metric server to monitor how their users consume resources. The next step was to determine what users are doing. Sean recommends using a tool like Grafana or Prometheus, which has a native integration within GitLab (although, F5 used a tool called K9), to extract the data from the K8s metric server and display it on some sort of dashboard using Grafana or Prometheus.\n\n## Some more tips for Kubernetes runners\n\n### Cutting them off: Enforcing limits\n\nOnce a user hits their limit, most of the time the end result is their job gets killed. Usually the user will notice a mistake, go in, and fix their code, but most likely they will just ask for more resources.\n\nThe best way to determine whether or not to allocate more of your finite resources to a user is to determine need, Sean explains. Ask the user to return to you with concrete numbers about the amount of RAM or CPU they require. But if you don’t have the resources, then don’t overextend yourselves to the detriment of your other users.\n\n### Use labels to reveal more data\n\n[Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) make it easier to identify workloads in Kubernetes, and can be expanded to environmental variables within GitLab, for example, job = \"$CI_JOB_ID\" and project = \"$CI_PROJECT_ID\". Labels can be used by admins who are manually doing Quebectal commands against K8s or they can be used in reporting tools like Prometheus or Grafana for setting limits. But labels are the most valuable when it comes to debugging purposes.\n\nBear in mind, labels are finicky in Kubernetes. [There are certain characters (stay away from \"?\") that can cause jobs to fail](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4565). There is a 63 character limit on labels. If there is an unsupported character or the label is too long, the job won’t start. There won’t be a really good indication as to why your job wouldn’t start either, which can be a pain for troubleshooting. [Bookmark this page to learn more about labels in Kubernetes](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) (including its limitations).\n\nGitLab users that run on K8s need to be cautious not to overburden the runner with GitLab CI jobs, and ought to consider setting limits on CPU to conserve valuable resources.\n\nWant to learn more about how F5 manages their Kubernetes runners on their GitLab instance? Watch Sean's presentation at GitLab Commit San Francisco in the video below.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/Hks5ElUxkP4\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Learn more\n\n* [Read on](/solutions/kubernetes/) to learn more about how GitLab and Kubernetes work together, and explore our plans for future integration with Kubernetes.\n\n* Explore the official documentation on [Kubernetes executor](https://docs.gitlab.com/runner/executors/kubernetes.html), which covers everything from choosing options in your configuration file to giving GitLab Runner access to the Kubernetes API, environment variables, volumes, helper containers, security context, privileged mode, secret volume, and removing old runner pods.\n\nCover Photo by [Kolleen Gladden](https://unsplash.com/@rockthechaos?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/track-and-field?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note.text-center}\n",[1002,110,4440],{"slug":5222,"featured":6,"template":678},"best-practices-for-kubernetes-runners","content:en-us:blog:best-practices-for-kubernetes-runners.yml","Best Practices For Kubernetes Runners","en-us/blog/best-practices-for-kubernetes-runners.yml","en-us/blog/best-practices-for-kubernetes-runners",{"_path":5228,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5229,"content":5235,"config":5241,"_id":5243,"_type":16,"title":5244,"_source":17,"_file":5245,"_stem":5246,"_extension":20},"/en-us/blog/creating-a-dark-ui-for-gitlabs-web-ide",{"title":5230,"description":5231,"ogTitle":5230,"ogDescription":5231,"noIndex":6,"ogImage":5232,"ogUrl":5233,"ogSiteName":692,"ogType":693,"canonicalUrls":5233,"schema":5234},"How we created a dark UI for GitLab's Web IDE","The Web IDE now has a Dark Mode, and we've put together a few learnings from a design perspective.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669611/Blog/Hero%20Images/ide-dark-post-banner.png","https://about.gitlab.com/blog/creating-a-dark-ui-for-gitlabs-web-ide","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we created a dark UI for GitLab's Web IDE\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Marcel van Remmerden\"},{\"@type\":\"Person\",\"name\":\"Jeremy Elder\"}],\n        \"datePublished\": \"2020-05-20\",\n      }",{"title":5230,"description":5231,"authors":5236,"heroImage":5232,"date":5237,"body":5238,"category":14,"tags":5239},[3405,3406],"2020-05-20","\n\nOne of the most popular and exciting feature requests we often hear about from our amazing community is a [dark mode for the entire GitLab UI](https://gitlab.com/gitlab-org/gitlab/-/issues/14531). It's currently the second most upvoted issue for all of GitLab.\n\nNext to being very popular in the design and development world, a dark mode can be incredibly helpful for users with vision impairments. One of our community members posted this comment, that demonstrates very well how valuable it can be to give users the chance to choose between a light and a dark mode:\n\n> It really comes down to website accessibility. I am legally blind and part of my eye condition is something called photophobia (which is poorly named—it's not a \"fear\" of light, it's that direct bright lights, especially sudden direct bright lights, are like having an ice pick shoved into my eyeballs.)\n\nAt GitLab, we believe in small changes and fast iterations. When our Design team was thinking about how we could split this up and tackle it in small steps, we looked for isolated pieces of our UI that we could create a dark mode for, and the feature that stood out was the [Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/#web-ide).\n\n## What is the Web IDE?\n\nThe Web IDE (Integrated Development Environment) is a code editor in the browser that allows you to change multiple files at once. Afterwards, you can commit their changes to a branch and create merge requests to discuss those changes and eventually merge them.\n\n![GitLab Web IDE](https://about.gitlab.com/images/blogimages/ide-dark-light-mode-browser.png){: .center}\nThe GitLab Web IDE\n{: .note.text-center}\n\nUsers of the Web IDE find it to be helpful for quickly making small changes or easily viewing their files in a familiar context, similar to their appearance in a local editor.\n\n### Syntax highlighting\n\nAfter deciding the Web IDE would be the first feature of the GitLab user interface (UI) to get a dark mode, we faced one fundamental question: How would the dark mode align with syntax highlighting themes already within GitLab? There are several themes that users may choose to display their repository files, snippets, or other code elements in their preferred way.\n\n![User syntax settings](https://about.gitlab.com/images/blogimages/ide-dark-syntax.png){: .center}\nUser syntax highlighting theme settings\n{: .note.text-center}\n\nThe Web IDE exists as a tool within the larger context of GitLab. Similarly, the syntax themes exist within the context of the Web IDE. Our goal was to avoid scenarios where the code area that follows the syntax highlighting theme wouldn't be aligned with the rest of the UI, which could be jarring.\n\nWe made the decision to keep the settings easily consumable, and treat the dark mode for the Web IDE UI as an extension of the dark syntax highlighting theme. From version 13.0 on, you can enable it by selecting the dark syntax highlighting theme, and the rest of the Web IDE will automatically follow. This also gives us the opportunity to later extend other themes and align the rest of the Web IDE UI to their colors.\n\n## The design process\n\n### Light and dark UI vs. themes\n\nInitially, we defined a few concepts to help shape our approach. We refer to light and dark UI in terms of the qualities they have, like brightness, depth, structure, and hierarchy. In GitLab, themes are preferential styles that reside on the UI, and use color to change only the appearance of a few elements.\n\n![UI versus themes](https://about.gitlab.com/images/blogimages/ide-dark-ui-vs-themes.png){: .center}\nThe difference between the UI and themes in GitLab\n{: .note.text-center}\n\n### Working in Figma\n\n#### Figma community\n\nAs soon as we wanted to start experimenting with the UI, we noticed first hand that \"Everyone can contribute\" is not only GitLab's core mission, but also an idea that is very much alive in the Figma community. The amazing designers at Microsoft have open-sourced a [design toolkit for Visual Studio Code](https://www.figma.com/community/file/786632241522687494) that allowed us to easily grab the relevant pieces, plug them into our own design file, and manipulate them.\n\n#### Asynchronous feedback\n\nAnother aspect that's deeply embedded in GitLab's ways of working and the way we build our products is asynchronous collaboration. We are the largest all-remote company in the world, and the two designers working on this feature are located in time zones seven hours apart.\n\nUsing Figma to collaborate and give each other feedback on our ideas enabled us to ship this feature with only having to schedule a single meeting, and the rest of the discussions handled via Figma comments. As these discussions were between designers and purely around visual aspects, we kept the discussion inside of Figma instead of using our own [Design Management](https://docs.gitlab.com/ee/user/project/issues/design_management.html) features, which came into play later during the discussions with the engineer working on this feature. It also allowed us to easily involve a lot of other team members, and get comments from other designers all over the globe.\n\n![A comment thread in Figma](https://about.gitlab.com/images/blogimages/ide-dark-async-thread.png){: .center}\nAsync design feedback in Figma\n{: .note.text-center}\n\n### Design challenges\n\nThe overarching design challenge was, and continues to be, understanding how the appearance of elements change as they appear in light vs. dark UI. Generally, structural, container-like UI elements decrease brightness, but content works the opposite and is sometimes nearly inverted. The fundamentals of light, shadow, and depth don't change, but the way the elements leverage them does. Similarly, the principles of content legibility, hierarchy, and contrast don't change, but the content does to uphold those principles.\n\nIn the side-by-side example below, we've compared just a few UI elements to demonstrate how they could change between light and dark UI.\n\n![Comparing light and dark UI in the Web IDE](https://about.gitlab.com/images/blogimages/ide-dark-comparison.png){: .center}\nComparing light and dark UI in the Web IDE\n{: .note.text-center}\n\nWhen we map the changes in this small sample, patterns start to emerge. Elements like backgrounds evenly shift darker together to maintain the same sense of depth, while some text content nearly inverts, and the button almost stays the same.\n\n![Colors mapped between light and dark UI](https://about.gitlab.com/images/blogimages/ide-dark-mapping-fade.png){: .center}\nMapping element color in light and dark UI\n{: .note.text-center}\n\nAt face value, it can seem as though many elements are inverted, but that's an oversimplification that leads to an interface looking not quite right. Here's how we're thinking about a few of the specific design challenges we encountered.\n\n#### Stateful elements\n\nIn a light UI, we darken element states to increase contrast, and typically do the opposite in a dark UI. This wasn't the case for tabs and similar elements that have backgrounds more closely integrated into other sections of the UI. And while the borders on the buttons got lighter, the background didn't because we needed to maintain text contrast.\n\n![Button and tab states in light and dark UI](https://about.gitlab.com/images/blogimages/ide-dark-states.png){: .center}\nComparing element states in light and dark UI\n{: .note.text-center}\n\nThis uncovers nuanced differences in the approach between dark and light UI, and we're still ratifying differences and establishing repeatable patterns. Needless to say each element deserves plenty of attention.\n\n#### Visual hierarchy and depth\n\nAs mentioned above, depth in dark mode was generally approached in the same way as in a light UI. Brighter elements are more forward, and darker ones recede. In the case of tabs and the file tree we are using a different approach and making these areas darker to increase contrast, rather than evenly darkening layers. We're learning that depth and contrast can both be effective tools, but they aren't always used the same in dark and light UI.\n\nA quick note on shadows, they shouldn't be replaced with glows — a completely different effect. Shadows are noticeably less effective in dark mode, so we explored more variance in gray backgrounds for neighboring sections.\n\n#### Graphics and illustration\n\nGraphics created for a light UI can seem garish or out of place in a dark UI. Images should be addressed on a case-by-case basis, but illustrations and icons can be addressed as a whole. We're exploring CSS variables and classes for SVG fill and path colors. One example that we had to solve were pipeline status icons. These exist in a couple of places in our product and initially had a white background. As this makes them stand out too much in dark mode, we had to rewrite their SVG code to get them to be transparent instead.\n\n![Icons with and without background fill changes](https://about.gitlab.com/images/blogimages/ide-dark-pipeline-icons.png){: .center}\nEnsuring that graphics, like icons, can be adjusted too\n{: .note.text-center}\n\nWith that in place we could map light and dark palettes. For now we're just ensuring that there aren't backgrounds in SVGs that feel out of place.\n\n#### How to ship in small pieces\n\nOur philosophy is to release changes or features as soon as they can help users. This sometimes leads to us shipping features that are not completely polished, which is in line with this [famous quote by Reid Hoffmann](https://twitter.com/reidhoffman/status/847142924240379904?lang=en), the founder and CEO of LinkedIn:\n\n> If you're not embarrassed by the first version of your product, you've launched too late.\n\nThe first version of this feature we released had only the code area styled with the dark syntax highlighting theme. Even though it felt a bit out of place, we received good feedback, which was evidence we were headed in the right direction.\n\n![MVC dark mode with light file tree](https://about.gitlab.com/images/blogimages/ide-dark-first-version.png){: .center}\nMVC dark mode with light file tree\n{: .note.text-center}\n\nFrom that point on, we sliced the remaining UI into smaller pieces. Every time we finished a piece, we released the newest version to all our users and started working on the next area. This highly iterative approach would not be acceptable in a lot of other companies, but at GitLab we believe in minimal viable changes ([MVC](https://handbook.gitlab.com/handbook/values/#minimal-viable-change-mvc)).\n\nAnother thing we learned was that a dark mode exposed not only structural UI deficiencies, but also inflexible code. Our initial intention was to leave a couple of seldom visited areas unstyled, but we noticed that keeping CSS styles from bleeding over into these areas would cause more problems and effort than fixing it altogether.\n\n#### Effective prototyping\n\nAs demonstrated in the previous paragraphs, one of the toughest challenges when designing a dark mode are elements with multiple states. This is also one of the aspects designers are still struggling with when prototyping, which led to us tackling this problem in a couple of ways:\n\n- Creating a large prototype with many artboards to represent edge cases and states\n- Relying heavily on a well-defined color system\n- Multiple sync calls with an engineer to fix smaller aspects, e.g., animations on the fly\n\nFor the next iteration of the prototype, we are going to investigate whether we can leverage Figma's components in a way that buttons have the same hover/focus/active states on multiple artboards. We have set up a [first small test](https://www.figma.com/proto/SvimjjirW0pkn69TNBztU9/Button-state-example?node-id=1%3A3&scaling=min-zoom) to prove that it would be possible, but haven't used it on a more complex prototype yet.\n\n![Web IDE Figma prototype](https://about.gitlab.com/images/blogimages/ide-dark-prototype-lg.png){: .center}\nWeb IDE prototype in Figma to demonstrate states\n{: .note.text-center}\n\n## What we learned so far\n\n- Answering questions for dark mode leads to many questions about why we're doing things a certain way in a light UI. It creates a great circular effect that challenges how we think about the entire UI, which leads to solid convictions.\n- Even a dark mode can be worked on in small iterations. Over the course of this process, we have created dark versions for all Web IDE specific UI elements, but also for dropdowns and modals, which are global elements. This not only makes it easier for us to think about the design, but also about how the code should be structured for a global dark mode.\n- We are clearly standing on the shoulders of giants. Designing and developing this dark mode at such a fast pace was only possible because we had many great in-depth resources about dark mode available to us. The two that stood out the most are [Apple's Human Interface Guidelines](https://developer.apple.com/design/human-interface-guidelines/ios/visual-design/dark-mode/) and the dark theme section from [Material Design](https://material.io/design/color/dark-theme.html).\n\n![Web IDE dark mode](https://about.gitlab.com/images/blogimages/ide-dark-loop.gif){: .center}\nWeb IDE dark mode\n{: .note.text-center}\n\n### Next steps\n\n- For the Web IDE as a feature, we're in the process of making our code more easily themable, so that other syntax highlighting themes can be extended more flexibly.\n- We're also planning to clean up the prototype we created, and either create a Web IDE UI Kit, or integrate it into our Pajamas design system, so that others can easily access, modify, and contribute to it.\n\nLastly, you can contribute too! We would especially love to see contributions to extend the other syntax highlighting themes to the rest of our Web IDE UI. If you have anything else in regards to the Web IDE you'd like us to consider, [create a new issue](https://gitlab.com/gitlab-org/gitlab/issues/new) and be sure to tag the GitLab UX Department (@gitlab-com/gitlab-ux). If you'd like to be part of our testing efforts at any level, sign up for our [GitLab First Look](/community/gitlab-first-look/) program. You can also [contribute](https://gitlab.com/gitlab-org/gitlab-design/-/blob/master/CONTRIBUTING-Figma.md) to the design of GitLab by starting with our [Pajamas UI Kit](https://www.figma.com/community/file/781156790581391771) in Figma.\n",[5240,1144,959,727],"webcast",{"slug":5242,"featured":6,"template":678},"creating-a-dark-ui-for-gitlabs-web-ide","content:en-us:blog:creating-a-dark-ui-for-gitlabs-web-ide.yml","Creating A Dark Ui For Gitlabs Web Ide","en-us/blog/creating-a-dark-ui-for-gitlabs-web-ide.yml","en-us/blog/creating-a-dark-ui-for-gitlabs-web-ide",{"_path":5248,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5249,"content":5254,"config":5259,"_id":5261,"_type":16,"title":5262,"_source":17,"_file":5263,"_stem":5264,"_extension":20},"/en-us/blog/how-we-release-software-patches",{"title":5250,"description":5251,"ogTitle":5250,"ogDescription":5251,"noIndex":6,"ogImage":4254,"ogUrl":5252,"ogSiteName":692,"ogType":693,"canonicalUrls":5252,"schema":5253},"Inside GitLab: How we release software patches","At GitLab, we tackle software patches in two ways – hands on and automatically. Learn how the release manager works to create and deliver essential fixes with auto-deploy releases on GitLab.com and patch releases for self-managed users.","https://about.gitlab.com/blog/how-we-release-software-patches","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Inside GitLab: How we release software patches\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-05-13\",\n      }",{"title":5250,"description":5251,"authors":5255,"heroImage":4254,"date":5256,"body":5257,"category":14,"tags":5258},[3676],"2020-05-13","\n\nYou can set your smartwatch by it: On the 22nd of every month, GitLab self-managed users can expect to see an update for the latest version of our self-managed product. In our monthly release, you might find new product features, iterations on existing features, and oftentimes you’ll see the end-result of requests for tooling or merge requests submitted by the community.\n\nBut just as in life, rarely is software development perfect. When a bug or security vulnerability surfaces, the release manager on the Delivery team will have to create a patch release for our self-managed customers. GitLab.com is continuously updated through the continuous delivery process. We call this CD process auto-deployments to avoid ambiguity with GitLab CD features. The auto-deploy process might include suggestions from merge requests submitted by users, customers, and our internal development team. So at GitLab, tackling the pesky problem of releasing software patches is solved in two very different ways.\n\n\"We are ensuring daily that everything built by developers is deployed on all environments prior to deploying to GitLab.com,\" explains [Marin Jankovski](/company/team/#marin), senior engineering manager, Infrastructure. \"You can think of a self-managed release as a snapshot of a GitLab.com deployment, with additional actions taken to ensure that our customers can use the same package for their own self-managed installations.\"\n\nRegardless of the origin of the bug or vulnerability, GitLab.com customers will receive the fix shortly after it has been created, which is a benefit of an automated CD process. The fixes for self-managed customers require specific preparation by the release manager.\n\nThe Delivery team works hard to automate more of the processes involved in creating a release to reduce the [mean time to production (MTTP)](/handbook/engineering/infrastructure/performance-indicators/#mean-time-to-production-mttp), which refers to the amount of time between when a developer merges a merge request to when it is deployed to GitLab.com.\n\n\"The whole mission of the Delivery Team is making sure that we can deliver faster as a company or at least enabling people to deliver faster, right?\" says Marin.\n\nBoth our self-managed and GitLab.com customers benefit from the Delivery team’s efforts to reduce cycle time and speed up deployments. In this blog post, we explain the similarities and differences between these two types of [GitLab releases](/handbook/engineering/releases/), and how the Delivery team prepares a patch release for our self-managed GitLab users and how they ensure that GitLab.com is always current using auto-deployments.\n\n## What does a release manager do?\n\nMembers of the GitLab Delivery team [rotate the responsibilities of being a release manager](/community/release-managers/) for our monthly self-managed releases, as well as the patch and security releases that might be shipped in-between. They are also responsible for efforts to migrate the company to automated, continuous deployments.\n\nOur self-managed releases and our GitLab.com releases use similar workflows and technology, but operate on different [timelines](/handbook/engineering/releases/#timelines), Marin explains.\n\nThe main priority for the release manager, regardless of the release type, is ensuring that GitLab stays available and secure since the application runs on GitLab.com, ensuring that the same issues do not trickle down to self-managed customer's infrastructure.\n\nWhen a bug or security vulnerability is reported fixed in GitLab, it is up to the release manager to evaluate whether or not it merits a patch or security release for our self-managed users. If the release manager decides the bug or vulnerability merits an update, they will start the preparation work.\n\nThe release manager has to decide whether or not to prepare a patch release or when to deploy it, and that largely depends on the context of the situation: \"And for now machines are not as good dealing with the context as humans are,\" says Marin.\n\n## All about patch releases\n\n### What is a patch release and why do we need them?\n\nThe release manager decides whether or not to issue a patch release based on the [severity of the bug being reported](/handbook/engineering/quality/issue-triage/#sts=Severity). The bugs are ranked based upon their severity – an S4 or S3 bug may be stylistic, such as a pixel or icon that is off tilt. It’s no less important, but it is less likely to impact someone’s workflow, and so it is unlikely that a patch release will be created just to fix an S4 or S3 vulnerability, Marin explains. Whereas an S1 or S2 vulnerability means a user may be prevented from upgrading to the newest version or there is a significant error impacting a user’s workflow. If an S1 or S2 bug is reported then that means a lot of people are likely experiencing it, so the release manager begins to prepare the patch release straightaway.\n\nOnce the fix is ready for an S1 or S2 vulnerability, the release manager will start the patch release. For example, the [GitLab 12.10.1 patch release](/releases/2020/04/24/gitlab-12-10-1-released/) was created after a few blocker issues were identified and developers fixed the underlying problem. The release manager estimated whether the assigned severities were correct, and after confirming, the patch release process was initiated and released within 24 hours of the blockers being identified.\n\nWhen the queue of S4s, S3s, and S2s starts to grow the release manager will look at the context to determine the urgency of the patch release. When the bugs start to pile up, the release manager will bundle the items together and ship them. A [patch or security release blog post](/releases/2020/04/24/gitlab-12-10-1-released/) summarizes the various fixes and updates that are pushed out to users in the form of patch or security releases.\n\n### How does the release manager create a patch release?\n\nWe use GitLab CI and various other GitLab features such as our ChatOps function to create GitLab patch release. The release manager will start the patch release by triggering the ChatOps command in our internal `#releases` channel in Slack.\n\n`/chatops run release prepare 12.10.1`\n\nThe ChatOps function works within Slack to trigger various events that GitLab then picks up and executes. For example, the Delivery team set-up ChatOps to automate a number of action items for the patch release, such as preparing the [relevant patch release issues](https://gitlab.com/gitlab-org/release/tasks/-/issues/1305), actionable items within the release, and so on.\n\nOnce the release manager triggers the ChatOps command using Slack, the rest of the process is automated within GitLab using our [CI/CD functions](/solutions/continuous-integration/). There is a lot of back-and-forth between ChatOps in Slack and GitLab throughout the release process as the release manager triggers some of the core steps in the process.\n\nWatch the video below for an in-depth look at the technical process behind preparing a patch release for GitLab.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/lHag9jARbIg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\n## Inside auto-deployments on GitLab.com\n\n### How do releases work on GitLab.com?\n\nThe process and tools used to update GitLab.com are similar to those used for creating a patch release. Updating GitLab.com requires less manual actions from the release manager.\n\nInstead of using ChatOps to trigger the deployment, we use CI features such as [scheduled pipelines](https://docs.gitlab.com/ee/ci/pipelines/schedules.html#working-with-scheduled-pipelines) which allow the release manager to schedule certain actions to happen at a particular time. Instead of a manual process, there is a pipeline every hour which checks for any new changes to GitLab projects, the changes are automatically pulled in, packaging and deployment scheduled, and automatically runs the QA testing and other required steps.\n\n\"So you have a lot of deployments happening on all of the different environments, before GitLab.com. And then once all these environments are in a good state and testing shows good results, the release manager takes an action to promote a deployment on GitLab.com,\" says Marin.\n\nThe CI/CD technology that powers updates to GitLab.com automates the release process, up to the point where a release manager will have to manually trigger deployment to the production environment for GitLab.com.\n\nMarin takes a deep dive into the process behind creating an update to GitLab.com in the video below. Watch to learn more about the process behind issuing an auto-deploy release.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/_G-EWRpCAz4\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## What’s next for the Delivery team\n\nThe main difference between auto-deploy releases on GitLab.com and patch releases for self-managed customers is that the latter process is longer and requires more manual action on the part of the release manager.\n\n\"Sometimes we are delayed with creating releases for our self-managed customers because of the handover issues, because of the tooling issues, because of the too many variables that go into producing a single release,\" says Marin.\n\nOne of the short-term [goals for the Delivery team](/handbook/engineering/infrastructure/team/delivery/#vision) is to reduce the amount of manual intervention required on the part of the release manager to [increase release velocity](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/170). The team is working to simplify, streamline, and automate the release process, which will help turn around lower-tier severity fixes faster. The focus on speed is indicated by the core key performance indicator: Reduce the MTTP – the time it takes for a merge request to deploy to GitLab.com – from its current 50 hours to eight hours.\n\nThe Delivery team is also working to drive the changes necessary to shift [GitLab.com to a Kubernetes-based infrastructure](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/112). These are two different approaches that share the same goal: Shipping faster on GitLab.com and for self-managed customers.\n\n## Have ideas for us?\n\nEveryone can contribute to GitLab, and we welcome feedback from our readers. If you have ideas for the Delivery team, feel empowered to [create an issue and attach the label `team: Delivery`](https://gitlab.com/groups/gitlab-org/-/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=team%3A%3ADelivery).\n\nCover photo by [Kyle Hinkson](https://unsplash.com/@kajhinkson?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/@kajhinkson?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note.text-center}\n",[915],{"slug":5260,"featured":6,"template":678},"how-we-release-software-patches","content:en-us:blog:how-we-release-software-patches.yml","How We Release Software Patches","en-us/blog/how-we-release-software-patches.yml","en-us/blog/how-we-release-software-patches",{"_path":5266,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5267,"content":5273,"config":5278,"_id":5280,"_type":16,"title":5281,"_source":17,"_file":5282,"_stem":5283,"_extension":20},"/en-us/blog/directed-acyclic-graph",{"title":5268,"description":5269,"ogTitle":5268,"ogDescription":5269,"noIndex":6,"ogImage":5270,"ogUrl":5271,"ogSiteName":692,"ogType":693,"canonicalUrls":5271,"schema":5272},"Get faster and more flexible pipelines with a Directed Acyclic Graph","A Directed Acyclic Graph will let you run pipeline steps out of order, break the stage sequencing, and allow jobs to relate to each other directly.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681298/Blog/Hero%20Images/james-eades-bfwhP9xodvE-unsplash.jpg","https://about.gitlab.com/blog/directed-acyclic-graph","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Get faster and more flexible pipelines with a Directed Acyclic Graph\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2020-05-12\",\n      }",{"title":5268,"description":5269,"authors":5274,"heroImage":5270,"date":5275,"body":5276,"category":14,"tags":5277},[1019],"2020-05-12","\n\nRecently GitLab released an exciting feature that reduces the pipeline running times and enables more flexibility in the order jobs are running. The feature, Directed Acyclic Graph (DAG), is free and available on GitLab.com and the self-managed versions. \n\n### Pipeline Jobs and Stages \n\nIn a typical [CI/CD pipeline](/topics/ci-cd/) you have multiple stages, which represent an automation of the [DevOps process](/topics/devops/) such as build, test, package, config, and deploy. Each stage is made up of one or more jobs. In the [CI/CD configuration file, .gitlab-ci.yml](https://docs.gitlab.com/ee/ci/quick_start/#what-is-gitlab-ciyml) you define the order of your stages. Usually the the pipeline will start with the build jobs; after all build jobs completed, test jobs will start, then jobs from the next stage will run, and so on. \n\nWhile this order makes a lot of sense, in some cases this might slow down the overall execution time. Imagine the build stage consists of task A which completes in 1 min, and task B which is very slow (say 5 mins). Task C is in the test stage but it depends on task A only. Still, task C must wait 5 minutes before it can be executed, resulting in a waste of 4 minutes.  \n\n![stage sequencing ](https://about.gitlab.com/images/blogimages/DAG/pipeline_diagram.png){: .shadow.medium.center}\n\n### Meet Directed Acyclic Graph\n\nDAG will allow you to run pipeline steps out of order, breaking the stage sequencing and allowing jobs to relate to each other directly no matter which stage they belong to. \n\nWith DAG, jobs can start to run immediately after their dependent jobs completed even if some jobs in the previous stage are still running. This new feature speeds up the CI/CD process and helps complete the deployment sooner.  \n\nIn the below example, a project generates both Android, iOS, and web apps in a multi-stage pipeline. The iOS tests started as soon as the iOS build passed rather than waiting for all the Android and web builds to pass too. It was the same for the iOS deployment – it completed after the iOS tests passed without waiting for the other test to complete. The total compute time might be the same, but the wall-clock time is different. In more complicated cases, it's possible to significantly reduce the overall wall-clock time of the pipeline by declaring exactly which jobs depend on which other jobs.\n\n![Build, test and deploy stages](https://about.gitlab.com/images/blogimages/DAG-blog.png){: .shadow.medium.center}\n\n### Defining dependent jobs\n\nThe .gitlab-ci.yml file introduces a new keyword: [needs](https://docs.gitlab.com/ee/ci/yaml/#needs) which gets a parameter on an array of jobs that it depends on. \n\n```\nios:\n  stage: build\n  script:\n    - echo \"build ios...\"\n\n\nios_test:\n  stage: test\n  script:\n    - echo \"test something...\"\n  needs: [\"ios\"]\n```\n\nThe ```ios_test``` job, which is part of the ```test``` stage, will start immediately after the ```ios``` job, which is in the ```build``` stage, and it will complete regardless of the status of other jobs in the ```build``` stage. \n\n### Where is it useful? \n\nThis can be valuable for the increasingly popular [monorepo](https://en.wikipedia.org/wiki/Monorepo) pattern where you have different folders in your repo that can build, test, and maybe even deploy independently, just like in the above example where the iOS, Android and web apps can be built, test and deployed individually. \n\nAnother usage could be when your pipeline contains some heavy tests that take a lot of time to execute. It would make more sense to start those tests as soon as possible, rather than wait for not relevant tasks to complete and only then start them. \n\n### You can also watch a demo of DAG  below:\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/9EHcQd3x_Sw\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\u003C/figure>\n\nCover image by [James Eades](https://unsplash.com/photos/bfwhP9xodvE) on [Unsplash](https://unsplash.com/)\n{: .note}\n\n\n",[110,1646],{"slug":5279,"featured":6,"template":678},"directed-acyclic-graph","content:en-us:blog:directed-acyclic-graph.yml","Directed Acyclic Graph","en-us/blog/directed-acyclic-graph.yml","en-us/blog/directed-acyclic-graph",{"_path":5285,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5286,"content":5292,"config":5297,"_id":5299,"_type":16,"title":5300,"_source":17,"_file":5301,"_stem":5302,"_extension":20},"/en-us/blog/introducing-autoscaling-gitlab-runners-on-aws-fargate",{"title":5287,"description":5288,"ogTitle":5287,"ogDescription":5288,"noIndex":6,"ogImage":5289,"ogUrl":5290,"ogSiteName":692,"ogType":693,"canonicalUrls":5290,"schema":5291},"How autoscaling GitLab CI works on AWS Fargate","Run your CI jobs as AWS Fargate tasks with GitLab Runner and the Fargate Driver","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681285/Blog/Hero%20Images/runner-autoscale-fargate-blog-cover.jpg","https://about.gitlab.com/blog/introducing-autoscaling-gitlab-runners-on-aws-fargate","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How autoscaling GitLab CI works on AWS Fargate\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darren Eastman\"}],\n        \"datePublished\": \"2020-05-11\",\n      }",{"title":5287,"description":5288,"authors":5293,"heroImage":5289,"date":5294,"body":5295,"category":14,"tags":5296},[1544],"2020-05-11","\n\nAutoscaling GitLab Runner is a unique value proposition for teams that run their self-managed build agents on cloud-hosted virtual machines. As the number of [CI/CD jobs](/topics/ci-cd/) run over a specific period can fluctuate, teams must have build agent auto-scaling solutions in place that are easy to set up, configure, and cost-efficient.  \n\nGitLab Runner [autoscaling](https://docs.gitlab.com/runner/configuration/autoscale.html) responds to demand by provisioning new cloud-hosted virtual machines with Docker and GitLab Runner. When demand is lower, any additional virtual machines above the configured minimum size are de-provisioned. However, while this model of automatically provisioning and terminating virtual machine instances continues to be useful for a vast plethora of use cases, customers also want to take advantage of the capabilities of cloud container orchestration solutions for executing GitLab CI/CD jobs. For some, adopting GitLab's Kubernetes integration for AWS Elastic Kubernetes Service and Google Kubernetes Engine has allowed them to take advantage of the benefits of containerized pipelines. For others, AWS Fargate has proven to be a compelling container orchestration solution, as it simplifies the process of launching and managing containers on AWS services ECS and EKS.\n\nWe are pleased to announce that as of the [12.10](/releases/2020/04/22/gitlab-12-10-released/) release, you can now auto-scale GitLab CI jobs on AWS Fargate managed containers.\n\n![](https://about.gitlab.com/images/blogimages/autoscaling-runners-ci-ecs-fargate.png)\n\n## So how does it work? \n\nIn GitLab 12.1, we released the GitLab Runner [Custom executor](https://docs.gitlab.com/runner/executors/custom.html). With the custom executor, you can create drivers for GitLab Runner to execute a job on technology or a platform that is not supported natively. To enable executing GitLab CI jobs on AWS Fargate, we developed a [GitLab AWS Fargate driver](https://gitlab.com/gitlab-org/ci-cd/custom-executor-drivers/fargate) for the Custom executor.  This driver uses the AWS Fargate `run-task` action to schedule a new task. A task in ECS is an instance of a task definition that runs the container or containers defined within the task definition. In this containerized solution for CI builds, the pipeline job executes on a container built from an image that must include the tools that you need to build your application.\n\nThe AWS Fargate Driver works in conjunction with GitLab Runner, a lightweight executable that executes pipeline jobs. Similar to the GitLab Runner executable, a `config.toml` file is the file used to pass configuration parameters to the driver. The AWS Fargate driver divides the CI job into the following stages.\n\n1. Config\n1. Prepare\n1. Run\n1. Cleanup\n\n## SSH connectivity\n\nFor the Fargate Driver to execute build commands in the container that is running as a task on ECS, the driver needs to be able to SSH into the container. So we have built additional capabilities into the driver to allow for a SSH connection between the GitLab Runner + AWS Fargate driver and the CI build container. \n\n![Fargate Driver SSH Connectivity](https://about.gitlab.com/images/blogimages/runner_fargate_driver_ssh.png)\n\n## Limitations\n\nAWS Fargate does not support running containers in privileged mode. For example, Docker-in-Docker (DinD), which enables the building and running of container images inside of containers, does not work on Fargate. In keeping with one of GitLab's core values, iteration, we will continue to iterate on solutions for this problem. So stay tuned for future enhancements.\n\n## Getting Started\n\nTo get started, review our detailed [configuration and setup guide.](https://docs.gitlab.com/runner/configuration/runner_autoscale_aws_fargate/index.html)\n\nWith the release of the GitLab Runner AWS Fargate driver, we provide the most diverse set of options in the industry for executing CI pipeline jobs in an autoscaling configuration. These options now include cloud-delivered virtual machines, AWS EC2, Google GCP, Azure Compute, and container orchestration platforms: AWS EKS, AWS ECS + Fargate, and Google Kubernetes. Our long term goal is to provide the best and most comprehensive solution for executing CI jobs at scale on the major cloud platforms.\n\n\nCover image by [Alessio Lin](https://unsplash.com/@lin_alessio) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[110,749,704],{"slug":5298,"featured":6,"template":678},"introducing-autoscaling-gitlab-runners-on-aws-fargate","content:en-us:blog:introducing-autoscaling-gitlab-runners-on-aws-fargate.yml","Introducing Autoscaling Gitlab Runners On Aws Fargate","en-us/blog/introducing-autoscaling-gitlab-runners-on-aws-fargate.yml","en-us/blog/introducing-autoscaling-gitlab-runners-on-aws-fargate",{"_path":5304,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5305,"content":5311,"config":5316,"_id":5318,"_type":16,"title":5319,"_source":17,"_file":5320,"_stem":5321,"_extension":20},"/en-us/blog/observations-on-how-to-iterate-faster",{"title":5306,"description":5307,"ogTitle":5306,"ogDescription":5307,"noIndex":6,"ogImage":5308,"ogUrl":5309,"ogSiteName":692,"ogType":693,"canonicalUrls":5309,"schema":5310},"Why iteration helps increase the merge request rate","How the Monitor:Health team has been able to increase the merge request rate using better iteration, a bias for action, and by writing things down.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666603/Blog/Hero%20Images/book.jpg","https://about.gitlab.com/blog/observations-on-how-to-iterate-faster","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why iteration helps increase the merge request rate\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2020-05-06\",\n      }",{"title":5306,"description":5307,"authors":5312,"heroImage":5308,"date":5313,"body":5314,"category":14,"tags":5315},[1441],"2020-05-06","\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2020-05-21.\n{: .alert .alert-info .note}\n\nDo you know much about fighter jets? It's okay if you don't, neither did I until I became a software developer. While it seems like a rather strange set of things to see a correlation with, they are intrinsically related through a man named [John Boyd](https://en.wikipedia.org/wiki/John_Boyd_(military_strategist)) who was a military strategist and a fighter pilot.\n\nBoyd was rather famous in the Air Force for a law he coined, which we're going to use to demonstrate the difference between iterative and recursive approaches to software development, why we favor it in the [Monitor:Health team](/handbook/engineering/development/ops/monitor/respond/) and why you might want to favor it too.\n\n_Boyd's Law of Iteration states that **speed** of iteration beats quality of iteration_\n\nThis law was developed by Boyd while observing dogfights between MiG-15s and F-86s. Even though the MiG-15 was considered a superior aircraft by aircraft designers, the F-86 was favored by pilots. The reason it was favored was simple: in one-on-one dogfights with MiG-15s, the F-86 won nine times out of ten.\n\nWhat's happening here? If the MiG is the better aircraft, why would the F-86 win the majority of the fights? Well according to Boyd who was one of the best dog-fighters in history suggested:\n\n> That the primary determinant to winning dogfights was observing, orienting, planning, and acting **faster** not better.\n\nThis leads to Boyd's Law of Iteration: Speed of iteration beats quality of iteration. What's pretty incredible is that you will find this same scheme throughout every section of modern software development:\n\n- Writing unit tests? Keep them small and lean so they can be run faster.\n- Writing usability tests? They work best when they're lean and you can quickly discard what's not working.\n- Writing a function, class, or feature? Start with the smallest, [most boring solution](https://handbook.gitlab.com/handbook/values/#boring-solutions) and iterate.\n- Doing an Agile approach? The quicker the better you'll often find.\n- Software in general is about failing early and often.\n\nSo lets pretend I've convinced you with some obscure fighter jet references and now you're ready to break down those merge requests and iterate quicker than you've ever iterated. Awesome! Let's talk about how to foster a team environment that allows for iteration, because that's the key here at GitLab. When you get started on this pilgrimage to [11 amazing merge requests per month as a goal](/handbook/engineering/development/performance-indicators/#mr-rate) you need to keep one very important thing in mind:\n\nIt's a team effort. While you as an individual developer will do an amazing job by hammering in on this skill, the real difference is made when you look at iteration as a tool to lift the team up. Think of yourself as the pilot that wants to get that faster iteration in to cover your buddies.\n\n## Bias for action\n\nWhen I got started at GitLab I was introduced to the idea of really believing in iteration as a methodology because it's a [company value](https://handbook.gitlab.com/handbook/values/#iteration).\n\n> Decisions should be thoughtful, but delivering fast results requires the fearless acceptance of occasionally making mistakes.\n\nThis was highlighted in various ways by different people across the company, but something that really stuck out to me was hearing another team member refer to the Monitor:Health team as a \"team with a strong bias for action\". We don't really believe in being reactive, instead we want to be we want to always be proactively improving the product. This underlying belief system trickles down from our team leader into every discussion, decision, deliverable set, and ultimately, how we as developers see our own agency operating. We **believe** in action, that an open merge request (even if it's not perfect) is always better than nothing.\n\nAs we mentioned, we have a bias for action. So, when our team anticipates a problem, we create a merge request first before starting a discussion. I know for a lot of people this might seem a bit counterproductive – what if this is a wasted effort? When in reality, [starting at a merge request](/handbook/communication/#start-with-a-merge-request) is the best possible place for any real discussion. It helps create a living log for the conversation, and creates more visibility for the problem we are fixing.\n\n## All code is bad code: Impostor syndrome, course correction, and accepting failure\n\nI had a mentor at my old company who was a fantastic programmer, and many of the people on my team looked up to him. One Friday afternoon, he gave a presentation that really shaped my understanding of iteration. This talk,  \"All code is bad code\" became rather famous in our small team because he mostly spoke about why the majority of the code he had written himself was ultimately bad code, and how the desire to **appear** smart is the number one barrier for people to become great software developers.\n\n> What you make with your code is how you express yourself, not the code itself - Eric Elliott\n\nProgramming is by its very nature difficult. As humans we're not particularly well-suited for deep and abstract logical thinking – our brains simply don't work like that by default and it's a learned skill for the most part. Being reminded of this is a humbling but freeing experience as it helps you move forward without fear. Every merge request you submit should be high quality but your definition of high quality should shift to mean delivering something useful to an end user.\n\nAt GitLab, we accept our limitations in that we might not know everything about the problem we're trying to solve. Instead, we lean heavily into the idea of the smallest, most [boring solutions](https://handbook.gitlab.com/handbook/values/#boring-solutions) that can be expanded upon quickly by collaborating with our team.\n\n> Our bias for action also allows us to course correct quickly.\n\nWe always accept there will be [uncertainty](https://handbook.gitlab.com/handbook/values/#accepting-uncertainty) in what we do as software developers but we don't let that stop us from trying to deliver an amazing product to our users.\n\nWhen we create a merge request, we do so with a [low sense of shame](https://handbook.gitlab.com/handbook/values/#low-level-of-shame) and [no ego](https://handbook.gitlab.com/handbook/values/#no-ego). This approach allows us to deliver fearlessly **even if we're wrong**.\n\nAs a team, this is the environment you want to foster because it helps create a wonderfully positive feedback loop: Low sense of shame > many merge requests submitted > more discussion > many iterations > ideally, the best possible collaborative results for the end user.\n\nThe core takeaway for team leaders is that **it's okay to make mistakes**. The best thing you can do as a team leader is to foster a safe place for developers to make mistakes and learn as they go.\n\nIf you're a developer, remember that **it's okay to make mistakes as long as you strive for course correction**.\n\n## Foster a healthy sense for urgency for writing things down\n\n> \"While you're thinking about doing it... just do it.\"\n\nIt's one of the things we do so well at GitLab in general it's writing things down. Documenting as we go is how we help our teampick up and go without needing to waste time on unnecessary communication.\n\nIt's safe to say that with our GitLab handbook being at [2,500,000 words](/handbook/about/#count-handbook-pages) and counting, the folks here take writing things down pretty seriously.\n\nAt GitLab, we believe this is also the path to a higher merge request rate.\n\nOn the Monitor:Health team and throughout GitLab believe in preserving our energy, capturing valuable conversations, and making them public to dispense this knowledge widely. As a new team member, I've seen this in action multiple times now. Over the course of my eight weeks at Gitlab, I can count on one hand the number of times I've had to ping a team member with a questions I could not find an answer to in our documentation. The discipline for keeping these notes really keeps the focus on delivering results since we don't have an excess of energy spent going back and forth with questions.\n\nIn my first four weeks at GitLab almost every single question I needed a answer to was already covered in the documentation someone else had already gone to the trouble of creating. Here is a list of some of my initial questions and links to the answers in GitLab documentation.\n\n- [How do I set up the local GitLab Development Kit?](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/master/doc/howto/auto_devops/tips_and_troubleshooting.md)\n- [How do I set up the GitLab Development Kit with Prometheus?](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/master/doc/howto/prometheus.md)\n- [How do I use embedded charts via Prometheus and Grafana?](https://docs.gitlab.com/ee/user/project/integrations/prometheus.html#embedding-gitlab-managed-kubernetes-metrics)\n- [How do I use the `@gitlab/ui` components?](https://gitlab.com/gitlab-org/gitlab-ui/-/blob/master/README.md)\n- [How do I handle styling in external projects?](https://gitlab.com/gitlab-org/gitlab-ui/-/blob/master/doc/css.md)\n- [How should components look and act on pages I am developing?](https://design.gitlab.com/)\n\nIf you can encourage your team to document solutions as problems arise, it can help developers deliver more.\n\n> Documentation is a love letter that you write to your future self. - Damian Conway\n\n## Tighten those feedback loops\n\n> Keep what works, disregard what doesn't.\n\nYou'll often notice that the feedback loop for tight-knit teams just gets tighter over time. People start to see patterns of what does and doesn't work as they work together over time. A good team should aim to address these patterns by keeping the ones that work and refining them but also by not being afraid to disregard the ones that don't work.\n\nRecently, the Monitor:Health team [delivered the first iteration of an incident management tool called the Status Page](https://about.gitlab.com/blog/how-we-built-status-page-mvc/). The team did an amazing job on the  [Status Page](https://gitlab.com/gitlab-org/status-page), with each team member really aiming to break problems into their smallest pieces and iterate quickly, which kept the overall merge request rate high for this project.\n\nThe post mortem of the development process is what made the biggest different. We came together as a team to discuss what aspects worked well and which aspects didn't with the end goal being to tighten our feedback loops so people can really work autonomously and asynchronously. It takes a lot of bravery to have a critical discussion about what didn't work publicly, and not just focus on all the things you have done well.\n\nHow does this play out? Well for us on the Monitor:Health team, it means getting better at refining issues to ensure that when they receive a `ready for development` label they are **truly** ready for anyone to pick up at any time and take it all the way to done. This really helps increase the overall merge request rate because developers don't need to sit through one to three feedback loops waiting for their questions to be answered, when they could be getting it done.\n\nFor an issue to have a [`ready for development` label](/handbook/product-development-flow/#build-phase-2-develop--test) it needs to have:\n\n- A clear definition of \"done\"\n- All the necessary conversations are already resolved inside the issue\n- Developer defines a clear set of expectations\n- Say whether tests are required\n- Say whether UX is needed\n\nWe are trying to enable **any** developer on the Monitor:Health team to read an issue with zero preexisting context and deliver a merge request related to the issue without needing to leave that issue. Remember, we're trying to [measure results not hours](https://handbook.gitlab.com/handbook/values/#measure-results-not-hours). The less time someone spends asking questions, the more time they can spend delivering results.\n\n> Hail to the issue, baby! - Duke Nukem if he was a software developer at GitLab\n\n## It's all about the team\n\nThe only reason we are able to create this level of velocity inside GitLab is because of the belief that we can and **should** iterate quickly. By having the support of the team across the main points in how to iterate, i.e., bias for action, low sense of shame, a healthy sense of urgency, and tight feedback loops is the bedrock that allows us to deliver results for customers via a better product.\n\nWell, that's all folks! I hope you enjoyed the read and learned something along the way. If you have any questions or want to suggest an improvement, drop me an email at: `doregan@gitlab.com`.\n\nWhen in doubt, iterate faster.\n\n## TL;DR, show me the proof\n\n![Results](https://about.gitlab.com/images/blogimages/iterate-faster/results.png){: .center}\n\nThe Monitor:Health frontend team has grown over time while increasing average merge request rate. The team's merge request rate reflects the current team size of four people.\n\n## Learn more\n\n- [GitLab Values](https://handbook.gitlab.com/handbook/values/)\n- [Boyds Law](https://blog.codinghorror.com/boyds-law-of-iteration/)\n- [All code is bad](https://www.stilldrinking.org/programming-sucks)\n- [Accepting failure](https://www.youtube.com/watch?v=UxvXgmZf6NU)\n\n[We're hiring](/jobs/) at GitLab, or consider [trying us out](/free-trial/) for free.\n\nCover image by [Aaron Burden](https://unsplash.com/photos/G6G93jtU1vE) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[915,894],{"slug":5317,"featured":6,"template":678},"observations-on-how-to-iterate-faster","content:en-us:blog:observations-on-how-to-iterate-faster.yml","Observations On How To Iterate Faster","en-us/blog/observations-on-how-to-iterate-faster.yml","en-us/blog/observations-on-how-to-iterate-faster",{"_path":5323,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5324,"content":5330,"config":5336,"_id":5338,"_type":16,"title":5339,"_source":17,"_file":5340,"_stem":5341,"_extension":20},"/en-us/blog/deploying-application-eks",{"title":5325,"description":5326,"ogTitle":5325,"ogDescription":5326,"noIndex":6,"ogImage":5327,"ogUrl":5328,"ogSiteName":692,"ogType":693,"canonicalUrls":5328,"schema":5329},"Deploying apps to GitLab-managed Amazon EKS with Auto DevOps","A Kubernetes tutorial: Use GitLab AutoDevOps to deploy your applications to Amazon EKS.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666959/Blog/Hero%20Images/gitlab-aws-cover.png","https://about.gitlab.com/blog/deploying-application-eks","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to deploy your application to a GitLab-managed Amazon EKS cluster with Auto DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2020-05-05\",\n      }",{"title":5331,"description":5326,"authors":5332,"heroImage":5327,"date":5333,"body":5334,"category":14,"tags":5335},"How to deploy your application to a GitLab-managed Amazon EKS cluster with Auto DevOps",[1161],"2020-05-05","\n\nDeploying an application onto Amazon EKS doesn't have to be painful. In fact, GitLab's [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) function makes it easy for developers to deploy applications from GitLab onto any cloud. In this tutorial, I break down how to deploy a simple ruby Hello, World application onto our GitLab-managed Amazon EKS cluster, which we created earlier ([read part one to learn how](/blog/gitlab-eks-integration-how-to/)). For the tutorial, I integrated GitLab with Amazon EKS in a GitLab group I created purposely for this, so all the projects created in the group can use the integration without any extra configuration. \n\nIn the previous blog post, we saw how seamless it is to create a Kubernetes cluster on Amazon EKS in GitLab with the right permissions. Developer productivity is greatly improved because there is no more need to manually set-up clusters and the same cluster can be used for multiple projects when Amazon EKS is integrated with GitLab at the group and instance levels, thus making onboarding new projects a breeze.\n\nIn this tutorial, we will be deploying a simple ruby Hello World application to our GitLab-managed Amazon EKS cluster. For the purpose of this tutorial, I have integrated GitLab with Amazon EKS at the group level on a group I own on GitLab.com, this way all projects created in the group can make use of the integration with no extra configuration.\n\n## A few things to note about AutoDevOps\n\nAuto DevOps provides pre-defined [CI/CD configuration](/topics/ci-cd/) which allows you to automatically detect, build, test, deploy, and monitor your applications. All you need to do is push your code and GitLab does the rest, saving you a lot of effort to set up the workflow and processes required to build, deploy, and monitor your project.\n\nYou'll need to execute the following steps for GitLab AutoDevOps to work seamlessly:\n\n* A [base domain](https://docs.gitlab.com/ee/user/project/clusters/#base-domain) name needs to be provided on GitLab’s integration page for Amazon EKS.\n\n ![AutoDevOps Base Domain](https://about.gitlab.com/images/blogimages/deploying-application-eks/base-domain.png){: .shadow.medium.center}\n Setting the base domain for Auto DevOps\n{: .note.text-center}\n\n* GitLab creates subdomains for every project that is deployed using the project slug, project ID and the base domain name. For example, the link `https://abubakar-te-demos-minimal-ruby-app-2.eksdemo-project.gitlabtechevangelism.net/` is automatically created where `abubakar-te-demos-minimal-ruby-app` is the project slug and the project ID of two, both prepended to the base domain name, `eksdemo-project.gitlabtechevangelism.net`.\n\n* Create a wildcard A-record for the base domain and point it to the Ingress endpoint created during the integration in the public-hosted zone of your domain name on Route53. Selecting the ALIAS option in Route 53 will present a list of resources you have already created. You will see your Ingress endpoint in the list of elastic load balancers. Alternatively, you can copy and paste from GitLab’s integration page.\n\n ![Route53 Alias for base Domain](https://about.gitlab.com/images/blogimages/deploying-application-eks/route53.png){: .shadow.small.center}\n Set-up alias for base domain using the generated Ingress endpoint.\n{: .note.text-center}\n\n* Install the pre-defined Kubernetes certificate management controller, certmanager on the GitLab - EKS integration, to ensure every URL created for your application has a Let’s Encrypt certificate.\n\n## Now, lets deploy our application\n\n### How to set-up the project\n\nIt takes five simple steps to set-up the project for your application.\n\nFirst, create a GitLab project from an existing sample, in this case, GitLab’s Auto DevOps example called Minimal Ruby App. There is nothing special about this application, it's just a ruby application you can use to try out the integration. If you integrated Amazon EKS at the group level on GitLab, you can just go ahead to create the project in the group. At the project level, you will have to perform the integration after creating the project.\n\nNext, copy the URL from the “Clone with HTTPS” field of the sample project, Minimal Ruby App:\n\n  ![Cloning over HTTPS](https://about.gitlab.com/images/blogimages/deploying-application-eks/https-clone.png){: .shadow.small.center}\n  The clone sample project.\n{: .note.text-center}\n\nThird, click the \"import project\" tab on the new project page, then click on the \"repo by URL\" button. Paste the URL you copied earlier in the text box for \"Git repository URL\" and click on \"create project\"\n\n  ![Importing Project](https://about.gitlab.com/images/blogimages/deploying-application-eks/import-project.png){: .shadow.medium.center}\n  The progress of the sample project import.\n  {: .note.text-center}\n\nNext, the project will be imported and all the files from the sample will be available in your new project.\n\n  ![Project import progress](https://about.gitlab.com/images/blogimages/deploying-application-eks/import-progress.png){: .shadow.medium.center}\n  The project import is completed.\n  {: .note.text-center}\n\nFinally, go to project settings > CI/CD > Auto DevOps and enable “Default to Auto DevOps pipeline”\n\n  ![Project Settings](https://about.gitlab.com/images/blogimages/deploying-application-eks/project-settings.png){: .shadow.medium.center}\n  Enable the Auto DevOps pipeline.\n  {: .note.text-center}\n\n### How to deploy your application\n\n* Now a pipeline is created and the project built, tested and deployed to production using the [default AutoDevOps CI files](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml).\n\n  ![Project Pipeline](https://about.gitlab.com/images/blogimages/deploying-application-eks/pipeline.png)\n  The first Auto DevOps pipeline.\n  {: .note.text-center}\n\n* Look inside the pipeline output to see the \"deployment to production\" line. This is where the URL is to access your application.\n\n  ![Deployment to production](https://about.gitlab.com/images/blogimages/deploying-application-eks/production-deploy.png)\n  Next, link to the deployed application.\n  {: .note.text-center}\n\n* In the image above, you can see the application has been deployed and can be accessed at `https://abubakar-te-demos-minimal-ruby-app-1.eksdemo-project.gitlabtechevangelism.net/`\n\nAnd it should show a “Hello World” message:\n\n  ![Deployed Application](https://about.gitlab.com/images/blogimages/deploying-application-eks/hello-world.png){: .shadow.medium.center}\n  The deployed application with \"Hello World\" message.\n  {: .note.text-center}\n\n## How to make changes to the deployed application\n\nIf any new changes are pushed, a different set of jobs is run to build, test, and review the changes before they can be merged to the master branch. I changed the \"Hello World\" text in the previous deployment to an HTML text in a new Git branch called `amazon-eks-html` using the GitLab WebIDE tool, and committed the changes.\n\n  ![Make changes to application](https://about.gitlab.com/images/blogimages/deploying-application-eks/new-commit.png)\n  Making new changes to application.\n  {: .note.text-center}\n\nWhile committing the changes, I selected \"start a new merge request (MR),\" which took me to the MR page where I added more information about the changes in a new MR.\n\n  ![New Merge request](https://about.gitlab.com/images/blogimages/deploying-application-eks/new-mr.png)\n  The MR to deploy the new application.\n  {: .note.text-center}\n\nIn the image above, you can see a pipeline is created to build, test and deploy using [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/) to allow you review the changes before deploying to production.\n\n  ![New MR pipeline test](https://about.gitlab.com/images/blogimages/deploying-application-eks/new-mr-test.png)\n  MR with Review Apps\n  {: .note.text-center}\n\nOnce the review is finished, the application is deployed to a dedicated namespace in the Amazon EKS cluster for you to review before deploying to production. A URL for the [Review App](https://docs.gitlab.com/ee/ci/review_apps/) is provided, as shown in the image below.\n\n  ![Review Applications](https://about.gitlab.com/images/blogimages/deploying-application-eks/review-apps.png){: .shadow.medium.center}\n  The application in the Review App.\n  {: .note.text-center}\n\nThe `stop_review` job cleans up the Review App once the review is done. If MR approvals are required, the MR must be approved before being merged into the master branch. Once merged to master, the project is built, tested, and deployed to production.\n\n  ![Merged Change MR](https://about.gitlab.com/images/blogimages/deploying-application-eks/merged-mr.png){: .shadow.medium.center}\n  Deploying changes to production.\n  {: .note.text-center}\n\nThe image above shows that a second pipeline ran after the MR was merged. Once completed, a button is provided to `view app` and also see memory consumption as the app runs. The `view app`\"` button will open the application on the project's subdomain.\n\n  ![Updated application](https://about.gitlab.com/images/blogimages/deploying-application-eks/updated-site.png)\n  Changes deployed to production.\n  {: .note.text-center}\n\n## Deploy to Amazon EKS with Auto DevOps\n\nThe Auto DevOps function at GitLab makes deploying an application to the Amazon EKS cluster quite simple. Really, all you need to do is push code, and Auto DevOps automatically detects the programming language and uses the necessary [buildpack](https://buildpacks.io/) to test, build, and deploy your application. GitLab also takes making changes to your application a step further using Review Apps, which deploys your app to a temporary environment for you to review the app before deploying to production.\n\nIf you have questions about how to integrate GitLab with Amazon EKS to create a Kubernetes cluster, revisit the [first blog post](/blog/gitlab-eks-integration-how-to/).\n",[1002,749,2932,726],{"slug":5337,"featured":6,"template":678},"deploying-application-eks","content:en-us:blog:deploying-application-eks.yml","Deploying Application Eks","en-us/blog/deploying-application-eks.yml","en-us/blog/deploying-application-eks",{"_path":5343,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5344,"content":5350,"config":5356,"_id":5358,"_type":16,"title":5359,"_source":17,"_file":5360,"_stem":5361,"_extension":20},"/en-us/blog/how-we-built-status-page-mvc",{"title":5345,"description":5346,"ogTitle":5345,"ogDescription":5346,"noIndex":6,"ogImage":5347,"ogUrl":5348,"ogSiteName":692,"ogType":693,"canonicalUrls":5348,"schema":5349},"How we built Status Page","Get the scoop on the process behind engineering and troubleshooting the implementation of the Status Page","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681270/Blog/Hero%20Images/red-green-chilli.jpg","https://about.gitlab.com/blog/how-we-built-status-page-mvc","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we built Status Page\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Olena Horal-Koretska\"}],\n        \"datePublished\": \"2020-04-29\",\n      }",{"title":5345,"description":5346,"authors":5351,"heroImage":5347,"date":5353,"body":5354,"category":14,"tags":5355},[5352],"Olena Horal-Koretska","2020-04-29","\n\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2020-05-08.\n{: .alert .alert-info .note}\n\nThe [Status Page](https://docs.gitlab.com/ee/operations/incident_management/status_page.html) is a new tool for communicating incident status and maintenance times, and is available to [GitLab Ultimate users](/pricing/ultimate/) (though the frontend is available to anyone). We are building the Status Page at GitLab to provide the best incident management experience both for our internal team and our customers.\n\n###  Current Status update approach\n\nIncident handling in GitLab happens inside the issue in a dedicated public project. The team discusses and posts updates in the issue. Public updates are manually published by engineer-on-call to [status.gitlab.com](https://status.gitlab.com/) every 15 minutes. But this setup is not ideal - responders lose precious time during fire-fight by switching tools and duplicating information. Also having public project for incident management means:\n\n1. Massive load on your instance in the \"hard times\"\n2. Higher monetary cost\n3. No access to status updates if your GitLab instance is down\n4. Sensitive information that comes up in a discussion is public and may cause vulnerability exploit while it is being fixed\n\n### Requirements\n\nOur first customer was the GitLab team. We [dogfood everything](/handbook/engineering/development/principles/#dogfooding), and the Status Page was no exception. So requirements were built based on the needs of our internal team:\n\n1. **No tool switching for incidents updates:** People that handle incidents have enough with responsibilities with fixing incidents so we should spare them the countless pings about the incident. These pings might be about what happened, the status of the incident, and how the incident is progressing. Granted, there are some users who want to receive immediate updates on the incident. Incident status should be updated in one place both for peer-problem-solvers and the public.\n\n1. **Ability to control level of visibility: Determine which updates are published and which are not**: When you have a problem in your product you do not necessarily want to shout it out: \"Hey, you malicious hacker, we've got a problem - go exploit it.\" Instead, you want your team address the vulnerability calmly and in a timely manner. Balancing the need for sending assuasive messages to the public without distracting fire-fight team can be achieved when you have control over the degree of visibility for the incident.\n\n1. **Display all types of data from GitLab incident description and comments on Status Page.** As incidents are handled in GitLab issues, there are a few options for how the data is represented to communicate the problem and/or solution, including images, embedded charts, etc. This rich data must be available in public updates.\n\n## Building the Status Page\n\nWe updated the design of the Status Page to address all of the concerns described in the previous section. Before we started building the Status Page, we lead a [Spike exercise](/handbook/engineering/development/ops/monitor/#spike) because we weren't entirely sure which approach to take for implementation.\n\nOur initial plan was to leverage one of the many open-source solutions for implementing the Status Page, but none of them could really satisfy all of our requirements. So instead we decided to go ahead and build our own implementation.\n\n#### Backend and data scraping\n\n When we started, we first brainstormed all the different solutions we could use to collect data from incidents issues to be automatically published to the Status Page:\n\n**Option 1: (GitLab) Webhooks: User sets up the endpoint to which GitLab will post incident updates**\n![Webhook](https://about.gitlab.com/images/blogimages/status-page/webhook.png){: .center}\n\n**Option 2: Alerts coming directly from Prometheus Alertmanager**\n![ALerts](https://about.gitlab.com/images/blogimages/status-page/alerts.png){: .center}\n\n**Option 3: Status page itself monitoring other services**\n![Monitoring](https://about.gitlab.com/images/blogimages/status-page/monitoring.png){: .center}\n\n**Option 4: Users manually pushing a markdown file to git or calling the API with some utility, e.g., `curl`**\n![Git Commit](https://about.gitlab.com/images/blogimages/status-page/gitcommit.png){: .center}\n\n**Option 5: CI job running manually or scheduled to run during certain intervals**\n![CI Job](https://about.gitlab.com/images/blogimages/status-page/cijob.png){: .center}\n\nThose approaches required either manual user input, additional CI resources, or building a sophisticated piece of software that was unnecessary for this case.\n\nWe didn't implement any of the five flows. But decided that the incident issue will be converted to JSON and published to the Status Page by a background job. This means no over-engineering and instant feedback on the Status Page.\n\n#### Frontend\n\nHere at GitLab we love VueJS so much we contribute to it, so the team has great expertise in VueJS. Consequently, our component library [GitLab UI](https://gitlab-org.gitlab.io/gitlab-ui/) and styling utilities are based on VueJS.\n\nYou could guess that we didn't have to debate which frontend framework to use! Besides the UI library as a dependency, GitLab provides `eslint`, `stylelint`, and SVGs as npm packages. It was very convenient to have them handy, as any new project setup always raises lots of questions about best practices and best tools. With all of this, the Status Page was able to be GitLab-branded. Feel free to use GitLab utilities in your own project too.\n\nNotably, the Status Page is a stand-alone application, hosted in a separate GitLab repository that uses JSON files generated by a background job. It is distributed under MIT license and can be used separately from GitLab given that correct data source is provisioned. You'll get the best experience by using our Status Page with GitLab.\n\nFrontend along with generated JSON data sources is published to [cloud storage](https://www.youtube.com/watch?v=27GgP6BXR6A). We currently only support Amazon S3 because we are hosted on Google Cloud and want our Status Page to be available even if Google Cloud (and, by extension, GitLab.com) is down. Credentials are provided by the user when setting up incident tracking project for Status Page.\n\n#### The Status page solution\n\nOnce an incident issue is created/updated in GitLab (manually or via [alert](https://docs.gitlab.com/ee/operations/incident_management/index.html#incident-management)), its description (with all types of data) along with comments that were marked as public will be picked by background job, converted to JSON, and mirrored on the Status Page.\n\n![Status Page flow](https://about.gitlab.com/images/blogimages/status-page/status-page-flow.png){: .center}\n\n### Hat tip to our Monitor:Health team\n\nThere are many more technical details that can be explained and that still to be implemented. It is the collaborative efforts of the [Monitor:Health team](/handbook/engineering/development/ops/monitor/respond/) that help make this possible. I'm thankful for all heated discussions, great insights, quick iterations, fast fails – the collaboration from the Monitor: Health team are advantages that have played out in the implementation of the Status Page feature.\n\n### Give the Status Page a try\n\nHere's a great [step by step guide](https://docs.gitlab.com/ee/operations/incident_management/status_page.html) on how to set-up a Status Page for your project with GitLab.\n\nEnjoy and may all your systems be operational!\n\nCover image by [Melina Yakas](https://unsplash.com/@myakas16) on [Unsplash](https://unsplash.com/photos/OBWEXPOurWo)\n{: .note}\n",[894],{"slug":5357,"featured":6,"template":678},"how-we-built-status-page-mvc","content:en-us:blog:how-we-built-status-page-mvc.yml","How We Built Status Page Mvc","en-us/blog/how-we-built-status-page-mvc.yml","en-us/blog/how-we-built-status-page-mvc",{"_path":5363,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5364,"content":5370,"config":5376,"_id":5378,"_type":16,"title":5379,"_source":17,"_file":5380,"_stem":5381,"_extension":20},"/en-us/blog/parent-child-pipelines",{"title":5365,"description":5366,"ogTitle":5365,"ogDescription":5366,"noIndex":6,"ogImage":5367,"ogUrl":5368,"ogSiteName":692,"ogType":693,"canonicalUrls":5368,"schema":5369},"How to get started with Parent-child pipelines","We introduced improvements to pipelines to help scale applications and their repo structures more effectively. Here's how they work.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667040/Blog/Hero%20Images/parent_pipeline_graph.png","https://about.gitlab.com/blog/parent-child-pipelines","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to get started with Parent-child pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chris Ward\"}],\n        \"datePublished\": \"2020-04-24\",\n      }",{"title":5365,"description":5366,"authors":5371,"heroImage":5367,"date":5373,"body":5374,"category":14,"tags":5375},[5372],"Chris Ward","2020-04-24","As applications and their repository structures grow in complexity, a repository `.gitlab-ci.yml` file becomes difficult to manage, collaborate on, and see benefit from. This problem is especially true for the increasingly popular \"[monorepo](https://en.wikipedia.org/wiki/Monorepo)\" pattern, where teams keep code for multiple related services in one repository. Currently, when using this pattern, developers all use the same `.gitlab-ci.yml` file to trigger different automated processes for different application components, likely causing merge conflicts, and productivity slowdown, while teams wait for \"their part\" of a pipeline to run and complete.\n\nTo help large and complex projects manage their automated workflows, we've added two new features to make pipelines even more powerful: Parent-child pipelines, and the ability to generate pipeline configuration files dynamically.\n\n## Meet Parent-child pipelines\n\nSo, how do you solve the pain of many teams collaborating on many inter-related services in the same repository? \nLet me introduce you to [Parent-child pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html), released with with [GitLab 12.7](/releases/2020/01/22/gitlab-12-7-released/#parent-child-pipelines). Splitting complex pipelines into multiple pipelines with a parent-child relationship can improve performance by allowing child pipelines to run concurrently. This relationship also enables you to compartmentalize configuration and visualization into different files and views. \n\n### Creating a child pipeline\n\nYou trigger a child pipeline configuration file from a parent by including it with the `include` key as a parameter to the `trigger` key. You can name the child pipeline file whatever you want, but it still needs to be valid YAML.\n\nThe parent configuration below triggers two further child pipelines that build the Windows and Linux version of a C++ application. \n\n```cpp\n#include \u003Ciostream>\nint main()\n{\n  std::cout \u003C\u003C \"Hello GitLab!\" \u003C\u003C std::endl;\n  return 0;\n}\n```\n\nThe setup is a simple one but hopefully illustrates what is possible.\n\n```yaml\nstages:\n  - triggers\n\nbuild_windows:\n  stage: triggers\n  trigger:\n    include: .win-gitlab-ci.yml\n  rules:\n    - changes:\n      - cpp_app/*\n\nbuild_linux:\n  stage: triggers\n  trigger:\n    include: .linux-gitlab-ci.yml\n  rules:\n    - changes:\n      - cpp_app/*\n```\n\nThe important values are the `trigger` keys which define the child configuration file to run, and the parent pipeline continues to run after triggering it. You can use all the normal sub-methods of `include` to use local, remote, or template config files, up to a maximum of three child pipelines.\n\nAnother useful pattern to use for parent-child pipelines is a `rules` key to trigger a child pipeline under certain conditions. In the example above, the child pipeline only triggers when changes are made to files in the _cpp_app_ folder.\n\nThe Windows build child pipeline (`.win-gitlab-ci.yml`) has the following configuration, and unless you want to trigger a further child pipeline, it follows standard a configuration format:\n\n```yaml\nimage: gcc\nbuild:\n  stage: build\n  before_script:\n    - apt update && apt-get install -y mingw-w64\n  script:\n    - x86_64-w64-mingw32-g++ cpp_app/hello-gitlab.cpp -o helloGitLab.exe\n  artifacts:\n    paths:\n      - helloGitLab.exe\n```\n\nDon't forget the `-y` argument as part of the `apt-get install` command, or your jobs will be stuck waiting for user input.\n\nThe Linux build child pipeline (`.linux-gitlab-ci.yml`) has the following configuration, and unless you want to trigger a further child pipeline, it follows standard a configuration format:\n\n```yaml\nimage: gcc\nbuild:\n  stage: build\n  script:\n    - g++ cpp_app/hello-gitlab.cpp -o helloGitLab\n  artifacts:\n    paths:\n      - helloGitLab\n```\n\nIn both cases, the child pipeline generates an artifact you can download under the _Job artifacts_ section of the Job result screen.\n\nPush all the files you created to a new branch, and for the pipeline result, you should see the two jobs and their subsequent child jobs.\n\n![Parent-child pipeline result](https://about.gitlab.com/images/blogimages/non-dynamic-pipelines.png){: .shadow.medium.center}\nThe result of a parent-child pipeline\n{: .note.text-center}\n\n## Dynamically generating pipelines\n\nTaking Parent-child pipelines even further, you can also dynamically generate the child configuration files from the parent pipeline. Doing so keeps repositories clean of scattered pipeline configuration files and allows you to generate configuration in your application, pass variables to those files, and much more.\n\nLet's start with the parent pipeline configuration file:\n\n```yaml\nstages:\n  - setup\n  - triggers\n\ngenerate-config:\n  stage: setup\n  script:\n    - ./write-config.rb\n    - git status\n    - cat .linux-gitlab-ci.yml\n    - cat .win-gitlab-ci.yml\n  artifacts:\n    paths:\n      - .linux-gitlab-ci.yml\n      - .win-gitlab-ci.yml\n\ntrigger-linux-build:\n  stage: triggers\n  trigger:\n    include:\n      - artifact: .linux-gitlab-ci.yml\n        job: generate-config\n\ntrigger-win-build:\n  stage: triggers\n  trigger:\n    include:\n      - artifact: .win-gitlab-ci.yml\n        job: generate-config\n```\n\nDuring our self-defined `setup` stage the pipeline runs the `write-config.rb` script. For this article, it's a Ruby script that writes the child pipeline config files, but you can use any scripting language. The child pipeline config files are the same as those in the non-dynamic example above. We use `artifacts` to save the generated child configuration files for this CI run, making them available for use in the child pipelines stages.\n\nAs the Ruby script is generating YAML, make sure the indentation is correct, or the pipeline jobs will fail.\n\n```ruby\n#!/usr/bin/env ruby\n\nlinux_build = \u003C\u003C~YML\n    image: gcc\n    build:\n        stage: build\n        script:\n            - g++ cpp_app/hello-gitlab.cpp -o helloGitLab\n        artifacts:\n            paths:\n                - helloGitLab\nYML\n\nwin_build = \u003C\u003C~YML\n    image: gcc\n    build:\n        stage: build\n        before_script:\n            - apt update && apt-get install -y mingw-w64\n        script:\n            - x86_64-w64-mingw32-g++ cpp_app/hello-gitlab.cpp -o helloGitLab.exe\n        artifacts:\n            paths:\n                - helloGitLab.exe\nYML\n\nFile.open('.linux-gitlab-ci.yml', 'w'){ |f| f.write(linux_build)}\nFile.open('.win-gitlab-ci.yml', 'w'){ |f| f.write(win_build)}\n```\n\nThen in the `triggers` stage, the parent pipeline runs the generated child pipelines much as in the non-dynamic version of this example but instead using the saved `artifact` files, and the specified `job`.\n\nPush all the files you created to a new branch, and for the pipeline result, you should see the three jobs (with one connecting to the two others) and the subsequent two children.\n\n![Dynamic parent-child pipeline result](https://about.gitlab.com/images/blogimages/dynamic-pipelines.png){: .shadow.medium.center}\nThe result of a dynamic parent-child pipeline\n{: .note.text-center}\n\n## Pipeline flexibility\n\nThis blog post showed some simple examples to give you an idea of what you can now accomplish with pipelines. With one parent, multiple children, and the ability to generate configuration dynamically, we hope you find all the tools you need to [build CI/CD workflows](/topics/ci-cd/) you need.\n\nYou can also watch a demo of Parent-child pipelines below:\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/n8KpBSqZNbk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[110,749,915,726],{"slug":5377,"featured":6,"template":678},"parent-child-pipelines","content:en-us:blog:parent-child-pipelines.yml","Parent Child Pipelines","en-us/blog/parent-child-pipelines.yml","en-us/blog/parent-child-pipelines",{"_path":5383,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5384,"content":5390,"config":5395,"_id":5397,"_type":16,"title":5398,"_source":17,"_file":5399,"_stem":5400,"_extension":20},"/en-us/blog/why-gitops-should-be-workflow-of-choice",{"title":5385,"description":5386,"ogTitle":5385,"ogDescription":5386,"noIndex":6,"ogImage":5387,"ogUrl":5388,"ogSiteName":692,"ogType":693,"canonicalUrls":5388,"schema":5389},"Why GitOps should be the workflow of choice","What is GitOps and how do you apply it in real-world applications?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681239/Blog/Hero%20Images/shiro-hatori-WR-ifjFy4CI-unsplash.jpg","https://about.gitlab.com/blog/why-gitops-should-be-workflow-of-choice","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why GitOps should be the workflow of choice\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2020-04-17\",\n      }",{"title":5385,"description":5386,"authors":5391,"heroImage":5387,"date":5392,"body":5393,"category":14,"tags":5394},[2558],"2020-04-17","\n\n## How did we get here?\n\nIn 2006, with the launch of AWS Elastic Compute, Amazon set off a revolution in the way we, as developers, consume and use compute and other resources required to deploy and maintain the applications we write. Not long after, infrastructure-as-code started to explode onto the scene with projects like Puppet, Ansible, and Terraform.\n\nAs these technologies matured, it became apparent that scaling applications in a modern or cloud environment required reproducible, reusable components, and infrastructure-as-code became the gold standard for ensuring the proper allocation of resources to an application. At the same time, the infrastructure space and world of software continued to evolve. The concept of [continuous delivery](/topics/ci-cd/) and release of software came into vogue and was popularized by large technology companies. The \"book\" on continuous delivery came in 2011, where it became apparent that to move fast enough to keep up with market demands, a radically [faster DevOps](/topics/devops/) cycle was required.\n\nAs continuous delivery for software becomes more commonplace, new solutions in the infrastructure space have been created to keep up. Kubernetes and the rise of [\"serverless\"](/topics/serverless/) promised to once again free developers from the need to worry about infrastructure. In a post-DevOps world - how does one think about infrastructure-as-code and applications as one cohesive unit?  Enter GitOps.\n\n## What is GitOps?\n\n[GitOps](/topics/gitops/) is conceptually not that different from either infrastructure-as-code or continuous delivery. In fact, in many ways, it is the convergence of those two concepts. Developers and operations teams alike can share a common repository of code, and GitOps allows a developer-like experience for managing applications and their underlying infrastructure. In that way, you can use GitOps as an operating model for modern infrastructures like Kubernetes, serverless, and other cloud native technologies.\n\nVersion control and [continuous integration](/solutions/continuous-integration/) are essential tools for deploying software continuously and reliably. GitOps brings both of those software best practices to operations by making the repository the central-source-of-truth for all of the infrastructure required to run applications. With GitOps, any change to infrastructure is committed to the git repository along with any application changes.\n\nThis allows developers and operators to use familiar development patterns and branching strategies. From there, a merge request provides the [central place to collaborate](/topics/gitops/gitops-gitlab-collaboration/) and suggest changes. Once merged into the mainline, CI/CD should be configured to deploy both the application and infrastructure changes automatically. The way this enables synchronization between developers and operators is what can be very appealing about GitOps as the next iteration of DevOps.\n\n## Why GitOps?\n\nWhy are so many organizations large and small considering a move to a more GitOps-focused culture?\n\nAs software has eaten the world, business operational excellence has become directly aligned with the ability to deliver quality software faster. Business survival depends on adaptive and efficient software development practices. Those practices require new processes and changes in the way we think about change management.\n\nIn many software practices, the concept of code review and approval is where most of the checks and balances for deploying production code comes into play. At GitLab, we believe that the [merge request](https://docs.gitlab.com/ee/user/project/merge_requests/) is the best place to collaborate on code and approve changes.  Processes and tools that are external to the code change only serve to increase cycle time and inhibit an organization’s ability to deploy code quickly.\n\nOnce an organization has embraced continuous integration and code review as the place for change request approval, it is a natural progression to discuss the idea of continuous delivery to production after those CI gates and human approvals are passed. As GitOps takes that concept a step further and integrates the pipeline to production directly in the git and merge request workflow, it’s become a hot topic and one that will become the normal workflow for efficient software organizations. Taking unnecessary steps and tools out of the critical path to production enables an organization to deliver better products faster, without sacrificing the governance required to deploy code.\n\n\n\nCover image by [Shiro Hatori](https://unsplash.com/@shiroscope) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[110,873,232],{"slug":5396,"featured":6,"template":678},"why-gitops-should-be-workflow-of-choice","content:en-us:blog:why-gitops-should-be-workflow-of-choice.yml","Why Gitops Should Be Workflow Of Choice","en-us/blog/why-gitops-should-be-workflow-of-choice.yml","en-us/blog/why-gitops-should-be-workflow-of-choice",{"_path":5402,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5403,"content":5409,"config":5416,"_id":5418,"_type":16,"title":5419,"_source":17,"_file":5420,"_stem":5421,"_extension":20},"/en-us/blog/geo-is-available-on-staging-for-gitlab-com",{"title":5404,"description":5405,"ogTitle":5404,"ogDescription":5405,"noIndex":6,"ogImage":5406,"ogUrl":5407,"ogSiteName":692,"ogType":693,"canonicalUrls":5407,"schema":5408},"Why we enabled Geo on the staging environment for GitLab.com","Geo is GitLab's solution for distributed teams and now we can validate and test it at scale.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669656/Blog/Hero%20Images/donald-giannatti-4qk3nQI3WHY-unsplash-small.jpg","https://about.gitlab.com/blog/geo-is-available-on-staging-for-gitlab-com","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we enabled Geo on the staging environment for GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fabian Zimmer\"},{\"@type\":\"Person\",\"name\":\"Douglas Alexandre\"}],\n        \"datePublished\": \"2020-04-16\",\n      }",{"title":5404,"description":5405,"authors":5410,"heroImage":5406,"date":5413,"body":5414,"category":14,"tags":5415},[5411,5412],"Fabian Zimmer","Douglas Alexandre","2020-04-16","\nWe're testing Geo at scale on GitLab.com – our largest installation of GitLab – because we believe the best way to guarantee that Geo works as expected is to [use it ourselves](/handbook/product/product-processes/#dogfood-everything).\n\nGeo is GitLab's [solution for distributed teams](https://docs.gitlab.com/ee/administration/geo/index.html). We want teams all over the world to have a great user experience - independent of how far away users are from their primary GitLab installation. To accomplish this goal, read-only Geo nodes can be created across the world in close geographical proximity to your teams. These Geo nodes replicate important data, such as projects or LFS files, from the primary GitLab instance and thereby make the data available to users. Geo can also be used as part of a disaster recovery strategy because it adds data redundancy. Geo nodes follow the primary installation closely and allow customers to failover to this node in case the primary node becomes unavailable.\n\nMany of GitLab's customers use Geo on self-managed installations that serve hundreds to thousands of users. Geo is a critical component of GitLab installations and our customers expect Geo to work at any scale. We are testing Geo at scale on our GitLab.com installation because if it works for us, chances are it will work for our worldwide group of users too.\n\nIn this blog post, we'll explain why and how we chose to enable GitLab Geo on our pre-production environment (from now on referred to as \"staging\"), the challenges we encountered, some of the immediate benefits to our customers, and what will be next.\n\n## Why do we need to use Geo at GitLab?\nIn order to build the best product possible, we believe it is imperative to [use GitLab ourselves](/handbook/product/product-processes/#dogfood-everything). Many of our Geo customers have thousands of users actively using GitLab and a major challenge for the team was to test and validate new Geo functionality at scale. Enabling Geo on the GitLab.com staging environment makes this task a lot easier.\n\nWe also used Geo to [migrate GitLab.com from Microsoft Azure to Google Cloud in 2018](/blog/moving-to-gcp/), which allowed us to improve the product by identifying bottlenecks. In the last two years, GitLab has grown dramatically and in order to push Geo forward, we need to enable it (again).\n\n### Test Geo at scale\nWhen the team decides to add new functionalities to Geo, for example [package repository replication](https://gitlab.com/groups/gitlab-org/-/epics/2346), we had to ensure that the feature's performance is as expected. Having Geo available on staging allows us to deploy these changes behind a feature flag first and evaluate the performance before shipping the feature to customers. This is especially relevant to some of Geo's PostgreSQL database queries. On a small test deployment, things may look fine, but at scale these queries can time out, resulting in replication issues.\n\nWe also deploy code to our staging environment twice a week, which means that any regressions surface before a new packaged release.\n\n### Prove that Geo can be deployed as part of our production infrastructure\nA large amount of automation is required to run GitLab.com with millions of users, and our SRE team is constantly improving how we run GitLab.com. The first step bringing Geo into our production environment is to deploy Geo as a part of our staging environment. Without the right monitoring, runbooks, and processes in place, it would not be possible to move Geo into production where it could be used to enable geo-replication and/or as part of our disaster recovery strategy.\n\n## Setting up Geo on staging\n\nSetting up Geo on staging had some unique challenges, you can get a detailed overview in our [Geo on staging documentation](/handbook/engineering/development/enablement/systems/geo/staging.html).\n\nIn order to deploy Geo, we opted for a minimally viable approach that is sufficient for a first iteration. Geo is currently deployed as a single all-in-one box, not yet as a [Geo high-availability configuration](https://docs.gitlab.com/ee/administration/geo/replication/multiple_servers.html). Geo deploys happen automatically via Chef, similar to any other part of the infrastructure.\n\n![Geo staging Diagram](https://about.gitlab.com/images/blogimages/geo-on-staging/geo_staging_diagram.png){: .shadow.medium.center}\n\nWe currently replicate only a subset of data using [Geo's selective synchronization feature](https://docs.gitlab.com/ee/administration/geo/replication/configuration.html#selective-synchronization), which also allows us to dogfood this feature. Selective synchronization uses a number of complex database queries and this helps us validate those at scale. We chose to replicate the `gitlab-org` group, which contains mostly of GitLab's projects (including [GitLab](https://gitlab.com/gitlab-org/gitlab) itself).\n\nWe also needed to configure Geo to use the same logical [Gitaly shards](https://docs.gitlab.com/ee/administration/repository_storage_paths.html) on the secondary compared to the primary node. We'll [improve our Geo documentation](https://gitlab.com/gitlab-org/gitlab/-/issues/213840) to ensure it is clear when this is required.\n\nA logical Gitaly shard is an entry in the GitLab configuration file that points to a path on the file system and a Gitaly address:\n\n```\n\"git_data_dirs\": {\n  \"default\": {\n    \"path\": \"/var/opt/gitlab/git-data-file01\",\n    \"gitaly_address\": \"unix:/var/opt/gitlab/gitaly/gitaly.socket\"\n  }\n}\n```\n\nIn the example above, we have only one logical shard identified by the key `default`, but we could have as many as needed.\nEvery project on GitLab is associated with a logical Gitaly shard, which means that we know where all relevant data (repositories, uploads, etc.) is stored. A project `example` that is associated with the logical Gitaly shard `default`, would therefore be stored at `/var/opt/gitlab/git-data-file01` and the Gitaly server would be available at `/var/opt/gitlab/git-data-file01`.\n\nThis information is stored in the PostgreSQL database and in order for Geo to replicate projects successfully we needed to create the same Gitaly shard layout. On the Geo secondary node, we are using only one physical shard to store the data for all projects. To allow it to replicate any project from the primary node, we had to point all the logical Gitaly shards to the same physical shard on the secondary node.\n\nGeo on staging is configured to use [cascading streaming replication](https://www.postgresql.org/docs/current/warm-standby.html#CASCADING-REPLICATION), which allows one standby node in the staging [Patroni cluster](https://github.com/zalando/patroni) to act as relay and stream write-ahead logs (WAL) to the Geo secondary. This setup also has the advantage that Geo can't put an additional load onto the primary database node and we are also not using physical replication slots to further reduce the load. [Patroni will likely be supported in Omnibus packages](https://gitlab.com/groups/gitlab-org/-/epics/2588) and we will review these settings to allow our customers to benefit from this setup.\n\nPostgreSQL will automatically fall back on its `restore_command` to pull archived WAL segments using [wal-e](https://github.com/wal-e/wal-e), if it cannot retrieve the segment by streaming replication. This can happen after a failover, or if the replication target has deleted the relevant segment if Geo is lagging behind it.\n\nIn the future, we will use this to experiment with [high-availability configurations of PostgreSQL on a secondary Geo node](https://gitlab.com/groups/gitlab-org/-/epics/2536).\n\n## What we learned and how we can improve\n\nWe opened [23 issues before successfully rolling out Geo on our staging environment](https://gitlab.com/groups/gitlab-org/-/epics/1908) - this is too many. We know that installing and configuring Geo in complex environments is time-consuming and error-prone, and is an area where we can improve. The current process for a self-managed installation requires [more than 70 individual steps](https://gitlab.com/gitlab-org/gitlab-design/issues/731) - this is too much. [Geo should be simple to install](https://gitlab.com/groups/gitlab-org/-/epics/1465) and we aim to reduce the number of steps to below 10. Using Geo ourselves really underscored the importance of improvements in this area.\n\n### Some Geo PostgreSQL queries don't perform well\n\nGeo uses PostgreSQL Foreign Data Wrappers (FDW) to perform some cross-database queries between the secondary replica and the tracking database. FDW queries are quite elegant but have lead to some issues in the past. Specifically, staging is still running PostgreSQL 9.6, and Geo benefits from some FDW improvements available only in PostgreSQL 10 and later, such as join push-down and aggregate push-down.\n\nWhile enabling Geo on staging, some FDW queries timed out during the backfill phase. Until staging is being upgraded to a newer version of PostgreSQL, increasing the statement timeout to 20 minutes on the Geo secondary node was sufficient to allow us to proceed with the backfill.\n\nAs a direct consequence of enabling GitLab on staging, we are working to [improve Geo scalability by simplifying backfill operations](https://gitlab.com/groups/gitlab-org/-/epics/2851), eliminating these cross-database queries, and removing the FDW requirement. We also plan to [upgrade to PostgreSQL 11 in GitLab 13.0](https://gitlab.com/groups/gitlab-org/-/epics/2414).\n\n### Bug fixes\nWe've also discovered and fixed a number of bugs in the process, such as [failing to synchronize uploads with missing mount points](https://gitlab.com/gitlab-org/gitlab/-/issues/209752), [invalid ActiveRecord operations](https://gitlab.com/gitlab-org/gitlab/-/issues/210589), and [excessively re-synchronizing files in some situations](https://gitlab.com/gitlab-org/gitlab/-/issues/207808).\n\n## What's next?\nWe are already providing value to our customers by enabling Geo on staging because the Geo team can test and validate Geo at scale at lot easier. Next up is enabling [automatic runs of our end-to-end test on staging](https://gitlab.com/gitlab-org/quality/team-tasks/issues/385), which would reduce the manual testing burden even further. There are also some other improvements, such as [enabling high-availability configurations of PostgreSQL using Patroni on Geo nodes](https://gitlab.com/groups/gitlab-org/-/epics/2536) that we would like to test on staging.\n\nEven though enabling Geo on staging is already very useful, it is just a step forward to rolling out Geo on GitLab.com in production. We are currently evaluating the business case for enabling Geo on GitLab.com as part of our disaster recovery strategy and for geo replication.\n\nCover image by [Donald Giannatti](https://unsplash.com/photos/4qk3nQI3WHY) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[749,915,728],{"slug":5417,"featured":6,"template":678},"geo-is-available-on-staging-for-gitlab-com","content:en-us:blog:geo-is-available-on-staging-for-gitlab-com.yml","Geo Is Available On Staging For Gitlab Com","en-us/blog/geo-is-available-on-staging-for-gitlab-com.yml","en-us/blog/geo-is-available-on-staging-for-gitlab-com",{"_path":5423,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5424,"content":5429,"config":5434,"_id":5436,"_type":16,"title":5437,"_source":17,"_file":5438,"_stem":5439,"_extension":20},"/en-us/blog/conan-c-cpp-package-management-integration",{"title":5425,"description":5426,"ogTitle":5425,"ogDescription":5426,"noIndex":6,"ogImage":4861,"ogUrl":5427,"ogSiteName":692,"ogType":693,"canonicalUrls":5427,"schema":5428},"Modern C and C++: How Conan integration works in GitLab","Conan is a leading C and C++ package manager and it is now available in GitLab. Store and share packages easily with your teams or publicly.","https://about.gitlab.com/blog/conan-c-cpp-package-management-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Modern C and C++: How Conan integration works in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jordi Mon\"}],\n        \"datePublished\": \"2020-03-31\",\n      }",{"title":5425,"description":5426,"authors":5430,"heroImage":4861,"date":5431,"body":5432,"category":14,"tags":5433},[4925],"2020-03-31","\n\nAs a single application for all the software development and delivery lifecycle, GitLab strives to support all the different software workflows and pipelines. Regardless of how complex this cycle might be (I’m looking at you C++), what we want to do is soothe these pains for C and C++ GitLab users. Following up on this metaphor, as doctors we would like to listen to the patient first: It all started with our community explaining their symptoms and chipping in the first ideas [here](https://gitlab.com/gitlab-org/gitlab-foss/issues/54747). This became even more relevant for GitLab when clients in C++ reliant industries like finance, robotics or embedded software added their interest to supporting package management for C++.\n\n### Conan is now available on GitLab\n\nThe C and C++ ecosystems have a ton of legacy tooling. It is what it is: they’ve been around for a long time and the community is, in a way, very DIY-driven. For example, many C++ libraries are advertised as “Zero deps inside.” This badge is intended as a sign of quality, and is even a bit of a status symbol for the devs and maintainers. That's fine for C/C++ developer but what about the users of such libs? Regardless of the actual quality of the lib’s code, if you wanted to use any of them, you’d better have a local, updated copy of them in a Git submodule. This is especially relevant for head-only monsters like Boost, the most popular set of libs in C++. In other words, in order to make use of them (that’s why they were created in the first place, I guess), you basically have to download the [source code](/solutions/source-code-management/), build it yourself (good luck with that), compile it and include the resulting binary in your project. This process can be time consuming and, if build processes are not well documented or supported, it can be exasperating. All of this can become a real nightmare if transitive dependencies are present, or if different [version control systems](/topics/version-control/) have been used. It's also tricky when deciding upon static or dynamic binaries, static or dynamic linking, single or multi-threaded, 32-bit or 64-bit…\n\n### How to build C and C++ packages in GitLab the Conan way\n\nThe GitLab Conan integration allows Conan users to set GitLab as the remote registry for their packages. Users will be able to set the remote and upload and install packages from GitLab’s registry. Think of it this way: you still use the same CLI to work with your Conan packages, but GitLab is on the receiving end. In doing so, GitLab creates the unique opportunity to have the code and package generated from the code living in the same place, freeing users from having to manage multiple services to store packages and code separately and still have them working together. This allows users to share private packages within an organization that is already using GitLab, publish public packages for general or open source use, and will open up many possibilities in utilizing GitLab’s CI pipelines to build and consume these packages automatically.\n\nCheck out a full demo:\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/2VVmrKNpC_0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n or a speedrun of Conan performed by the team in charge of the integration:\n\n \u003C!-- blank line -->\n \u003Cfigure class=\"video_container\">\n   \u003Ciframe src=\"https://www.youtube.com/embed/7NYgJWg-w5w\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n \u003C/figure>\n \u003C!-- blank line -->\n\nIf you need more help you can always refer to the [Conan docs](https://docs.conan.io/en/latest/).\n\n### The future of C and C++ in GitLab: Game development workflows!\n\nWhat’s coming next? In tradition with GitLab’s value of iteration, the initial release of Conan is a bare-bones API that allows you to publish and consume packages within GitLab. Next up will be a UI that displays much of the commonly referenced metadata for a given package, pre-written CI templates for automatic package publishing and consuming, less strict package naming conventions with remotes scoped to the group and project level within GitLab, and the list goes on.\n\n* [Conan Repository User Interface](https://gitlab.com/gitlab-org/gitlab/issues/33892)\n* [Project and Group level support for Conan Repository](https://gitlab.com/gitlab-org/gitlab/issues/11679)\n\nIf you are interested in package management at large, find a list of publicly available issues about the topic [here](https://gitlab.com/gitlab-org/gitlab/issues?label_name=Package+Repositories). Also, please note that if game development is your interest, large file support, partial clone and many other features that make game development possible with Git, will soon be available in GitLab. All the heavy lifting required for those massive binaries, engines, and animations will feel like feathers when we release those features. Stay tuned to know more about it in our newsletter.\n\n",[749,232,703,727],{"slug":5435,"featured":6,"template":678},"conan-c-cpp-package-management-integration","content:en-us:blog:conan-c-cpp-package-management-integration.yml","Conan C Cpp Package Management Integration","en-us/blog/conan-c-cpp-package-management-integration.yml","en-us/blog/conan-c-cpp-package-management-integration",{"_path":5441,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5442,"content":5448,"config":5453,"_id":5455,"_type":16,"title":5456,"_source":17,"_file":5457,"_stem":5458,"_extension":20},"/en-us/blog/gitlab-ci-on-google-kubernetes-engine",{"title":5443,"description":5444,"ogTitle":5443,"ogDescription":5444,"noIndex":6,"ogImage":5445,"ogUrl":5446,"ogSiteName":692,"ogType":693,"canonicalUrls":5446,"schema":5447},"GitLab CI/CD on Google Kubernetes Engine in 15 minutes or less","Install GitLab's Runner on GKE in a few simple steps and get started with GitLab CI/CD pipelines.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667003/Blog/Hero%20Images/gke_in_15_cover_2.jpg","https://about.gitlab.com/blog/gitlab-ci-on-google-kubernetes-engine","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab CI/CD on Google Kubernetes Engine in 15 minutes or less\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Elliot Rushton\"}],\n        \"datePublished\": \"2020-03-27\",\n      }",{"title":5443,"description":5444,"authors":5449,"heroImage":5445,"date":5450,"body":5451,"category":14,"tags":5452},[5045],"2020-03-27","If you use [GitLab Self-Managed](/pricing/#self-managed), then getting started with GitLab CI using [GitLab's integration with Google Kubernetes Engine (GKE)](/partners/technology-partners/google-cloud-platform/) can be accomplished in a few simple steps. We have several blog posts and documentation that provide detailed [setup instructions for working with Kubernetes clusters](#other-resources). In this post, we highlight the essential steps so that you can get going with GitLab CI/CD in less than 15 minutes.\n\nBy using the GitLab and GKE integration, with one click, you install GitLab Runners on GKE and immediately start running your CI pipelines. Runners are the lightweight agents that execute the CI jobs in your [GitLab CI/CD](/topics/ci-cd/) pipeline.\n\n## Prerequisites:\n\nThe following pre-requisities will need to have been configured in order for you to use the built in GitLab GKE integration:\n- GitLab instance installed and configured with user credentials\n- [Google OAuth2 OmniAuth Provider](https://docs.gitlab.com/ee/integration/google.html) installed and configured on your GitLab instance\n- A Google Cloud project with the following [APIs enabled](https://docs.gitlab.com/ee/integration/google.html#enabling-google-oauth):\n  - Google Kubernetes Engine API\n  - Cloud Resource Manager API\n  - Cloud Billing API\n\n## Get started\n\n![Setup pipeline](https://about.gitlab.com/images/blogimages/ci-gke-in-15/gke_in_15_pipeline.png){: .shadow.medium.center}\n\n### Step 1\n\nWe’re going to add a shared runner at the instance level. First, as an administrator, click the “Admin Area” icon\n\n![Runner setup step 1](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_001.png){: .shadow.medium.center}\n\nThen on the left menu, select “Kubernetes”\n\n![Runner setup step 2](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_002.png){: .shadow.medium.center}\n\n### Step 2\n\nClick the green “Add Kubernetes cluster” button.\n\n![Runner setup step 3](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_003.png){: .shadow.medium.center}\n\n### Step 3\n\nThe screen to “Add a Kubernetes cluster integration” should come up. Click on the “Google GKE” icon on the right.\n\n![Runner setup step 4](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_004.png){: .shadow.medium.center}\n\n### Step 4\n\nGive your cluster a name, and select a “Google Cloud Platform project” from your linked GCP account. If no projects are populated in the menu then either your Google OAUTH2 integration isn’t configured correctly or your project is missing the needed permissions. Check that these are set up and that the [APIs mentioned in the prerequisites above](#prerequisites) are enabled.\n\nChoose a zone in which to run your cluster. For the purposes of running CI, the number of nodes in your cluster is going to be how many simultaneous jobs you can run at given time. As we are using the built-in GitLab Google Kubernetes integration, you can set a maximum of four nodes.\nHere we set that to three.\n\nClick “Create Kubernetes Cluster”\n\n![Runner setup step 5](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_005.png){: .shadow.medium.center}\n\nIt takes a few minutes for the cluster to be created. While it’s happening you should see a screen like this. You can leave this screen and come back (by going to “Admin Area> Kubernetes > [your cluster name]”)\n\n![Runner setup step 6](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_006.png){: .shadow.medium.center}\n\n### Step 5\n\nOnce the cluster has been created, we need to install two applications. First, install “Helm Tiller” by clicking on the “Install” button next to it.\n\n![Runner setup step 7](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_007.png){: .shadow.medium.center}\n\nThis takes a moment, but should be much quicker than creating the cluster initially was.\n\n![Runner setup step 8](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_008.png){: .shadow.medium.center}\n\n### Step 6\n\nNow that Helm Tiller is installed, more applications can be installed. For this tutorial we only need to install the “GitLab Runner” application. Click the install button next to GitLab Runner.\n\n![Runner setup step 9](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_009.png){: .shadow.medium.center}\n\nAgain, this should go pretty quickly.\n\n![Runner setup step 10](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_010.png){: .shadow.medium.center}\n\nOnce done, the button will change to an “Uninstall” button. You’re now set up with shared runners on your GitLab instance and can run your first CI pipeline!\n\n![Runner setup step 11](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_011.png){: .shadow.medium.center}\n\n### Next steps\n\nNow that you are up and running with GitLab CI/CD on GKE, you can build and run your first GitLab CI/CD pipeline. Here are links to a few resources to get you started.\n\n- [Getting Started with GitLab CI/CD](https://docs.gitlab.com/ee/ci/quick_start/)\n- [How to build a CI/CD pipeline in 20 minutes or less](/blog/building-a-cicd-pipeline-in-20-mins/)\n- [Getting started with Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/cloud_deployments/auto_devops_with_gke.html)\n\nIf you are planning to manage your own fleet of GitLab Runners, then you may also be thinking about how best to set up autoscaling of GitLab Runners. As we have just set up your first Runner on GKE, then you can review the [GitLab Runner Kubernetes Executor docs](https://docs.gitlab.com/runner/executors/kubernetes.html) for additional details as to how the GitLab Runner uses Kubernetes to run builds on a Kubernetes cluster.\n\n### Other resources\n\n- [Scalable app depoyment webcast](https://about.gitlab.com/webcast/scalable-app-deploy/)\n- [Install GitLab on a cloud native environment](https://docs.gitlab.com/charts/)\n- [Adding and removing Kubernetes clusters](https://docs.gitlab.com/ee/user/project/clusters/add_remove_clusters.html)\n- [Deploy production-ready GitLab on Google Kubernetes Engine](https://cloud.google.com/solutions/deploying-production-ready-gitlab-on-gke)\n\nCover image by [Agê Barros](https://unsplash.com/photos/rBPOfVqROzY) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[232,1002,873,110,1204,728],{"slug":5454,"featured":6,"template":678},"gitlab-ci-on-google-kubernetes-engine","content:en-us:blog:gitlab-ci-on-google-kubernetes-engine.yml","Gitlab Ci On Google Kubernetes Engine","en-us/blog/gitlab-ci-on-google-kubernetes-engine.yml","en-us/blog/gitlab-ci-on-google-kubernetes-engine",{"_path":5460,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5461,"content":5467,"config":5473,"_id":5475,"_type":16,"title":5476,"_source":17,"_file":5477,"_stem":5478,"_extension":20},"/en-us/blog/the-trouble-with-technical-interviews",{"title":5462,"description":5463,"ogTitle":5462,"ogDescription":5463,"noIndex":6,"ogImage":5464,"ogUrl":5465,"ogSiteName":692,"ogType":693,"canonicalUrls":5465,"schema":5466},"The main problem with technical interviews","Forget the coding exercise. Here's how to create realistic scenarios for engineering candidates in technical interviews.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681148/Blog/Hero%20Images/nycbrooklyn.jpg","https://about.gitlab.com/blog/the-trouble-with-technical-interviews","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The trouble with technical interviews? They aren't like the job you're interviewing for\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-03-19\",\n      }",{"title":5468,"description":5463,"authors":5469,"heroImage":5464,"date":5470,"body":5471,"category":14,"tags":5472},"The trouble with technical interviews? They aren't like the job you're interviewing for",[3676],"2020-03-19","\n\nInterviewing for an engineering job in the tech world can mean [you’ll be asked all sorts of questions](https://stackify.com/devops-interview-questions/). Sometimes, the job interview questions can be pretty straightforward: “Tell me about a time that you have implemented an effective monitoring solution for a production system.” Other times, the questions are impossible to answer and designed to spark your creativity: “How many windows are in New York City?” After passing the initial interview, the applicant or candidate graduates to the next tier of interviewing: The often-dreaded technical interview.\n\n## What is a technical interview?\n\nA technical interview is one that is conducted to gauge a candidate’s skill level for positions in the information technology, engineering, and science fields. It may also determine how much a candidate knows in more niche areas of a company, such as marketing, sales, and HR.\n\n## How to prepare for a technical Interviews\n\nProspective engineers often face a challenge when it comes to preparing for the technical interview, largely because there is no playbook for how companies set them up technical. It’s unclear whether to prepare by memorizing many different topics, or focusing on specific projects. Is it better to practice with a computer or a peer engineer? There are an overwhelming number of resources available online, but with little clarity as to what the standard is for a technical interview and little guidance from the company on what to expect, most of the time engineers start technical interviews in the dark.\n\nInconsistencies in the technical interview process isn’t just a job candidate problem. In fact, many companies struggle to set up a technical interview process that is effective, equitable, and allows the hiring manager to compare candidates. The problem with technical interviewing compounds when a company is experiencing rapid growth.\n\n## What are the challenges of conducting technical interviews at a growing company\n\n\"Imagine you had a hiring target of doubling your team size and all your interviews are conducted remotely. Welcome to GitLab,\" says Clement Ho, [frontend engineering manager on the Monitor: Health team](/company/team/#ClemMakesApps) at GitLab.\n\n![Hiring chart shows GitLab more than doubled the number of hires from around 400 in 2019 to roughly 1300 by end of 2020](https://about.gitlab.com/images/blogimages/fei_hiringchart.jpg){: .shadow.medium.center}\n\nGitLab more than doubled the number of hires from around 400 in 2019 to roughly 1300 by end of 2020.\n{: .note.text-center}\n\nWe identifed three core challenges with orchestrating technical interviews as GitLab grows.\n\n1. We didn't have enough interviewers for the pipeline of candidates.\n2. Our technical interviewing process was inconsistent and even a little biased.\n3. It was difficult to measure whether or not we were raising the bar.\n\n\"And by raising the bar, I mean making sure each candidate that joins the team makes the team better,\" says Clement.\n\nThese problems are by no means unique to GitLab. Any engineering company that is scaling rapidly will encounter some growing pains when it comes to hiring, and many will end up falling back on some of the typical models for conducting technical interviews.\n\n## The typical technical interview methods\n\nDuring his talk, [\"Using GitLab to Power our Frontend Technical Interviews\" at GitLab Commit San Francisco](https://www.youtube.com/watch?v=jSbCt8b_4ug), Clement explained the four different techniques that are often employed in technical interviews. Each method comes with advantages and disadvantages from the perspective of the hiring manager.\n\n## What are good technical interview questions \n\nA good technical interview needs to be about more than practical skills – it’s about the whole package.A candidate should possess the ideal coding skills but also be a team and culture fit and be able to discuss developer topics efficiently. A technical interview should include both situational interview questions and a skills assessment to discern a candidate’s potential.\n\nThe types of questions to ask can concern a candidate’s technical abilities and background, their career journey so far, and queries specific to the team or company.\n\n## Types of questions asked during a technical interview and their purpose\nEven though employers have already reviewed your resume and cover letter, they will want you to flesh that out during the interview to learn more about how you attained those skills. In order to assess your level of experience, they will likely also ask you to provide concrete examples from prior jobs.\nMake sure you are prepared—do your research on the company and the type of questions you may be potentially asked. This will help build your confidence level and reduce any nervousness you might feel. It’s also an opportunity for you to set yourself apart from other candidates by showcasing your knowledge and additional skills you can bring to the job.\n \nIt is important to be honest about your skill set because that is something employers value. You may find the company will be willing to hire someone who is transparent about the areas where they need to improve and where they’d like to gain more skills.\n\nExamples of common questions to expect in a technical interview:\n\n- What coding languages are you most familiar with?\n- What is your experience with Kubernetes with a specific example?\n- What’s the purpose of continuous integration in an automated build?\n- How have your previous technical roles prepared you for this job?\n- Tell me about a time when you received an unexpected assignment: how did you react, and what did the experience teach you?\n- Please provide more details about your educational background and how it prepared you for this position.\n- How did you go about teaching yourself a necessary technical skill while you were working on a project?\n- What are your strengths, and where do you think you need to improve your skills?\n- Do you have any technical certifications?\n- Please detail the work you did on the project you are most proud of.\n- What are your favorite and least favorite tech tools, and why?\n- What are the pros/cons of working in an agile environment?\n\n### Sample technical interview questions and answers\n \n- **How do you stay current with your technical knowledge and skills?** It’s a good idea to list online content you use to educate yourself, as well as tutorials and conferences you have attended to gain more knowledge. Perhaps you have also worked closely with vendors or attended sessions to learn about new product features.\n- **How do you troubleshoot technical problems?** Discuss the steps you take when you are answering a question. This will give employers a sense of how you problem-solve, and it provides a good overview of how well you understand the relevant concepts. Even if you don’t answer a question correctly, it will show the interviewer your process and reasoning, which are also important. You can mention resources you use, such as GitLab and Stack Exchange, as well as the developer community and any publications you read for advice.\n- **What is your level of experience with the software programs mentioned on your resume?** Describe how many years you have used the tools, your impressions of them, and bring up the companies you used them at, with specific examples.\n- **What programming language are you most proficient in?** You should discuss how you have become proficient in this language and why it is the one you are most comfortable using. You can also cite other languages you are familiar with.\n- **Describe a time you made an error and how you resolved it.** Don’t use an example of an egregious error since that may put you in a negative light. Be sure to emphasize that you took responsibility and acted with integrity, and did whatever it took to resolve the issue.\n\n## What are some soft skills and coding skills to highlight in a technical interview\n\nA technical interview assesses your technical expertise, coding skills, and ability to fit into a team. However, soft skills are just as important and often aid in the development of more technical skills – particularly in a team setting.\n\nAs the technical interview progresses, be prepared to tackle some questions about soft skills like:\n\n- **Communication skills:** How does the candidate contribute to group discussions, confront problems, or give and receive feedback?\n- **Organizational skills:** What are the ways in which the candidate provides visibility into their work processes and their methods of staying on task?\n- **Collaboration skills:** Are they interested in helping their teammates? What do they think are the keys to successfully navigating a team project? How have they collaborated on past projects?\n- **Creative problem solving:** How do they work through a problem in a project? Do they use both analytical and creative thinking to come up with solutions?\n\n### How to prepare for verbal technical questions\n\nThere are countless articles online that try to prepare job candidates for a verbal technical interview, but whether this method truly effective for evaluating the technical competency of a software engineer is debatable.\n\nIn the typical scenario, the interviewer asks the candidate to describe a technical concept and tries to measure their fluency in said concept based on the quality of the conversation.\n\nThe advantage of this method is that the interviewer can understand how the candidate communicates, which is of particular importance when the engineering team is all-remote, as is the case at GitLab. The drawback? Being a good communicator does not necessarily mean the candidate knows how to code effectively.\n\n\"So I've interviewed candidates that could talk the talk, but they couldn't really write the code,” says Clement. \"And that's not a great situation for an engineer to join GitLab.\" Clement’s team has moved away from using verbal technical questions as a method for evaluating candidates.\n\n### Live coding exercises\n\nOne of the more popular methods for evaluating engineers is through live coding. While it allows the evaluator to see how engineering candidates answer data structure questions, it also has its disadvantages.\n\nA key advantage of live coding data structures is that it offers a fairly consistent measurement and evaluation.\n\n\"I can talk to another manager or another interviewer and be able to communicate, 'Hey, this person wasn't able to do a linked list, they got stuck here. They weren't able to understand a runtime efficiency here.' So it's pretty consistent,\" says Clement.\n\nBut the ability to create data structures is not always the best indicator of ability. Oftentimes engineers with a very traditional background or recent graduates will shine here, but someone who is more senior and able to do a lot of great things, but is perhaps not as brushed up on data structures, may struggle.\n\nLive coding interviews probably aren’t going anywhere fast, but the pitfalls of this method are well documented by engineers and hiring managers. Brennan Moore, a product engineer in New York City, explains why he does not conduct live coding interviews when evaluating a prospective candidate:\n\n> \"Much like the SAT when applying for college, live coding is a structured test. I didn’t go to a school that trained me to do live coding, and so will probably fail the test. As I’ve experienced it, live coding isn’t the meritocratic space that it pretends to be. Live coding interviews weed out the people who are good at live coding interviews,\" says Brennan in his [blog post](https://www.zamiang.com/post/why-i-don-t-do-live-coding-interviews).\n\nAt GitLab, we found that live coding exercises don't accurately represent engineering capability. Oftentimes, a recent computer science graduate will outperform a more senior candidate with a lot of valuable experience. In summary, live coding exercises will often disadvantage more senior candidates, people who are nervous in high-pressure situations (read: everyone), and advantages more junior engineers or people who have practiced live coding.\n\n### Digital prompt\n\nA third common method for evaluating candidates is to ask the engineer to code a UI using an online editor while on screen share with the evaluator.\n\nThe advantage of this method is that it allows the evaluator to observe how a candidate builds. The drawbacks here are similar to those with live coding. First, the engineer is under pressure to build while the evaluator watches on, making it a nerve-wracking situation. The other drawbacks come from an evaluation perspective: It is challenging to measure the effectiveness of this method and it is hard to compare between candidates.\n\n### Take-home project\n\nAny engineer (or writer, for that matter) can tell you, the supplemental take-home project is a very common ask when going through an interview process. The advantage here for us is that this assignment closely mimics the reality of building environments while working remotely at GitLab.\n\nBut this task comes with major drawbacks, mainly that it disadvantages candidates who may not have the time or capacity to complete the project.\n\n\"... imagine a scenario where you're a single parent and you have kids; you may not have as much opportunity to take dedicated time, a couple of hours after work to really focus on a take-home project compared to someone from a more privileged background,\" says Clement. \"They might be able to dedicate and output something better.\"\n\n[Diversity and inclusion is a core value](/company/culture/inclusion/) for GitLab, and anything that disadvantages candidates from underrepresented groups is not inclusive, and therefore suboptimal for evaluating candidates based on their engineering abilities.\n\n## What are they looking for during a technical interview?\n\nCompanies want candidates who can discuss the industry in the context of the job they are applying for. Be prepared to discuss examples of your work. Many will want to hear about soft skills, too—your ability to communicate and collaborate and work with others to problem-solve issues.\n\nThey will also want to see how passionate and enthusiastic you are and whether you have the self-motivation to not only do the job but take the initiative to do more than what you’re tasked with.\n\nAlso, interviewers will want to see whether candidates have the desire to increase their technical knowledge.\n\n## What are some online preparation tools and resources for technical interviews\n\n- Indeed offers a career guide to [help prepare for](https://www.indeed.com/career-advice/interviewing/what-is-a-technical-interview) a technical interview.\n- Interview Kickstart has several [webinars](https://learn.interviewkickstart.com/) to help prepare engineers for interviews.\n- Udemy offers a course in [Technical Interview Skills](https://www.udemy.com/course/technical-interview-skills/?utm_source=bing&utm_medium=udemyads&utm_campaign=BG-DSA_Webindex_la.EN_cc.BE&utm_content=deal4584&utm_term=_._ag_1222657343651662_._ad__._kw_udemy_._de_c_._dm__._pl__._ti_dat-2328215871879260%3Aloc-190_._li_103429_._pd__._&matchtype=b&msclkid=9f5132d9c84c17b02f7951a4f46279d6).\n- [Codecademy](https://www.codecademy.com/learn/technical-interview-practice-python?utm_id=t_kwd-79027793284383:loc-190:ag_1264438993811076:cp_370314525:n_o:d_c&msclkid=550de1275d811b2cfc0f82592b6d9626&utm_source=bing&utm_medium=cpc&utm_campaign=US%20Language%3A%20Pro%20-%20Broad&utm_term=%2Btechnical%20%2Binterview%20%2Bprep&utm_content=technical%20interview%20practice) also offers a course called - Technical Interview Practice with Python.\n- Here are some more general [interview tips](https://www.roberthalf.com/blog/job-interview-tips/interview-tips-to-help-you-land-the-job-you-want) that are applicable to all candidates.\n\n## Meaningful questions to ask the interviewer\n\nCandidates will also be given a chance to ask questions they might have to learn more about the company. This is a great opportunity to gain more insight into how the company operates, what its philosophy is, and its vision for the long term.\n\nIt’s also a good way to glean how the company views its IT team. If you don’t ask questions, that could give the impression you are unprepared or not terribly interested in the job.\n\nQuestions to ask can include:\n\n- What does a typical day looks like in this role?\n- Are there opportunities for training and further advancement?\n- What software development methodology do you use?\n- What are your code review practices?\n- Do you have on-call rotations? If so, how long is one rotation?\n- What are the responsibilities of the person on call?\n- Please provide more details about the team I will be working with, such as how many people are there, what their roles are, what the hierarchy is, and what areas of improvement you would like to see on the team.\n\n## The new way\n\nWhile each method for conducting a technical interview comes with advantages, there are also numerous disadvantages when it comes to conducting an effective and measurable evaluation and creating an equitable interview process. Under the guidance of Clement, the [Monitor:Health team](/handbook/engineering/development/ops/monitor/respond/) decided to interview frontend engineers in an entirely new way using GitLab.\n\nNow let's take a deep dive into the nuts and bolts of reinventing the technical interview for frontend engineers at GitLab. Just wondering about the key takeaways? [Skip ahead](#why-this-new-model-for-technical-interviews-is-better). As we continue to iterate on a more effective and measurable technical interview process, we hope this inspires other engineering organizations to rethink theirs and share learnings with us.\n\nOur first step: Standardize the interview process.\n\n### Fixing an MR on a test project\n\nThe team standardized the interview process by creating an open source test project, called `project-seeder`, which seeds projects to different candidates using a GitLab Bot. Candidates are assigned a merge request to troubleshoot in the project created for the technical interview. The `project-seeder` is powered by the GitLab Bot so the interviewer doesn't have to worry about API keys, and works in four steps:\n\n1. Exports the template project\n2. Imports template project\n3. Adds users with expiration\n4. Triggers pipeline for candidate to review MR\n\nThe candidate is sent an email with a link to the MR the candidate is assigned to fix as part of the technical interview.\n\n### Standardize the evaluation rubric\n\nThe team also created a standardized rubric for how the candidate's performance on a technical interview is evaluated.\n\n\"We don't want to be in a situation where unconscious bias or bias of one candidate over another plays a part because of our preconceived notions,\" says Clement.\n\nCreating a rubric that looks at multiple categories allows the evaluator to look at the performance of the candidate from a more holistic perspective, as opposed to looking at a candidate's performance on one technology.\n\nThe team created a [Periscope dashboard](/handbook/engineering/frontend/interview-metrics/) to create a feedback loop between the candidates and evaluators to identify opportunities for improvement in the technical interviewing process.\n\n![Frontend team used Periscrope to collect feedback from candidates who participate in technical interviews](https://about.gitlab.com/images/blogimages/fei_periscopedashboard.jpg){: .shadow.medium.center}\n\nThe frontend engineering team used Periscope to collect feedback from candidates who participate in technical interviews.\n{: .note.text-center}\n\n## Demoing the technical interview\n\n### Inside the technical interview project\n\nClement created a sample project to demonstrate how we use GitLab to power our technical interviews.\n\nIn the [gl-commit-example](https://gitlab.com/gl-commit-example) group, there is a subgroup with all the interview projects we are seeding to the imaginary candidates, a template, and a project seeder.\n\n![A screenshot of the sample project shows the interview project's subgroup, template, and project seeder application](https://about.gitlab.com/images/blogimages/fei_interviewproject.jpg){: .shadow.medium.center}\n\nThe interview project's subgroup, template, and project seeder application lives inside the sample project for the technical interview.\n{: .note.text-center}\n\n[Inside the template](https://gitlab.com/gl-commit-example/template), there are GitLab pages and the [interview test merge request](https://gitlab.com/gl-commit-example/template/-/merge_requests/1).\n\nThe assignment here is pretty simple. The candidate needs to update the website to say \"Hello GitLab Commit SF,\" but in order to accomplish this, the candidate will need to fix the failing pipeline.\n\n### Powering project-seeder\n\nWe use variables from GitLab CI to configure the [project-seeder application](https://gitlab.com/gl-commit-example/project-seeder).\n\n![Screenshot of the project for the project-seeder application](https://about.gitlab.com/images/blogimages/fei_projseederapp.jpg){: .shadow.medium.center}\n\nInside the project-seeder application which seeds the interview projects to job candidates.\n{: .note.text-center}\n\n\"I'm creating `new-project-example-two`, and I'm adding this bot user that I created and the expiration, so I can just easily run this pipeline and it'll seed this project,\" says Clement.\n\n![We use variables from the GitLab CI to configure the project-seeder applications](https://about.gitlab.com/images/blogimages/fei_variables.jpg){: .shadow.medium.center}\n\nThe next step is to run the setup pipeline, which will create the project, import the project, export the project, and share it with the job candidate.\n\n![A look inside the pipeline that will create the test project](https://about.gitlab.com/images/blogimages/fei_insidethepipeline.jpg){: .shadow.medium.center}\nA look inside the pipeline that will create the test project.\n{: .note.text-center}\n\nLooking inside example-one, we can see there is a project and [broken MR](https://gitlab.com/gl-commit-example/interview-projects/example-1/-/merge_requests/1).\n\n\"And an example for a candidate – they would probably look at the CI and see, 'Oh there's a failing test. Let's see what that's about. Oh, it looks like it's checking for \"hello world\". So since we changed the message earlier, we can just change this and get this test passing and then pass this interview,'\" says Clement.\n\n## Why this new model for technical interviews is better\n\nThe new model surpasses the old model because we created realistic scenarios that reflect what it's like to actually work for GitLab, and we established a more consistent method of measurement.\n\n\"So we're able to get better candidates overall. Candidates that pass through this technical interview, we're sure that they're going to be successful at GitLab,\" says Clement.\n\nBy designing our technical interviews this way, we can ensure that the interview project matches our actual product architecture at GitLab, which in this case is Ruby on Rails for Vue JS.\n\nWe also struggled in the past with finding a good way to check that the candidate knows how to use Git, and can navigate pipelines and testing. By using GitLab for interviews, we're able to confirm a candidate's competency with Git implicitly by evaluating their performance on the technical interviews.\n\nWe wanted to mirror the actual experience of troubleshooting a broken MR while working at GitLab, so we allow our candidates to use the internet during their technical interview. This allows the evaluator to see how the candidate solves problems and see their resourcefulness.\n\n\"If you're already using GitLab for your tooling, you're just exposing them to what it's like to work at GitLab; it's a more accurate representation,\" says Clement. \"And you can also make sure you're measuring testing proficiency and you make sure they understand how that works before they join your company.\"\n\n## Four key takeaways from our technical interview update\n\nWhether or not a company uses GitLab, there are a few key lessons that we learned by iterating on how we conduct technical interviews for engineers.\n\n1. **Make technical interviews as much like real work as possible**: Nine times out of ten, an engineering manager isn't going to sit back and watch an engineer break a sweat in a live coding exercise, any more than they will watch on as an engineer builds in UI. Create realistic scenarios based on the actual work and evaluate based on the candidate's performance.\n\n2. **Make any technical interview process \"open-book\"**: Engineering doesn't involve much rote memorization. Instead, allow the engineering candidate to use the internet (and in our case, the [GitLab Handbook](/handbook/)) to look up their questions. It's better to see how a candidate applies their knowledge and troubleshoots the inevitable problems that may arise. This change will likely improve your candidate experience too.\n\n3. **Standardize your rubric**: However the technical interview is done, make sure that the rubric is as objective as possible and that the candidate is evaluated based on various criteria, not on their familiarity with a particular technology. A strong rubric means a stronger, more valid method for evaluating candidate performance.\n\n4. **Create an inclusive process**: Think critically about how the technical interviewing process and evaluation is structured so a diverse group of candidates can be recruited and evaluated based on their merits. When in doubt, ask a diversity, inclusion and belonging expert or turn to your human resources team for advice. Still coming up empty? Hire a diversity consultant; it will be worth it.\n\n**Interviewing at GitLab?** We encourage you to use the resources GitLab creates during your technical interview. We don't publish our evaluation criteria publicly, but we do have the [Periscope dashboard](/handbook/engineering/frontend/interview-metrics/) which can provide some insight.\n\nWatch Clement's talk from [GitLab Commit San Francisco](https://www.youtube.com/watch?v=jSbCt8b_4ug) to learn more about how we used GitLab to power our technical interviewing process.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/jSbCt8b_4ug\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[1979,915],{"slug":5474,"featured":6,"template":678},"the-trouble-with-technical-interviews","content:en-us:blog:the-trouble-with-technical-interviews.yml","The Trouble With Technical Interviews","en-us/blog/the-trouble-with-technical-interviews.yml","en-us/blog/the-trouble-with-technical-interviews",{"_path":5480,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5481,"content":5486,"config":5491,"_id":5493,"_type":16,"title":5494,"_source":17,"_file":5495,"_stem":5496,"_extension":20},"/en-us/blog/gitlab-ci-cd-with-firebase",{"title":5482,"description":5483,"ogTitle":5482,"ogDescription":5483,"noIndex":6,"ogImage":4861,"ogUrl":5484,"ogSiteName":692,"ogType":693,"canonicalUrls":5484,"schema":5485},"How to leverage GitLab CI/CD for Google Firebase","Firebase is a powerful backend-as-a-service tool and when combined with GitLab it can be easy to enable continuous deployment of database, serverless and apps.","https://about.gitlab.com/blog/gitlab-ci-cd-with-firebase","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to leverage GitLab CI/CD for Google Firebase\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2020-03-16\",\n      }",{"title":5482,"description":5483,"authors":5487,"heroImage":4861,"date":5488,"body":5489,"category":14,"tags":5490},[2558],"2020-03-16","\n\nBuilding mobile apps can be painful - especially when it comes to finding a way to provide all the tooling needed to make the application feasible without becoming an expert in many different disciplines. [Firebase from Google](https://firebase.google.com/) aims to take away that burden by providing an app deployment platform and a BaaS or Backend-as-a-Service. While the offerings can vary greatly, most BaaS providers include a database, object storage, push notifications and some sort of hosting package. Firebase goes beyond this and provides user authentication built-in as well as [serverless](/topics/serverless/) functions, telemetry, and Google tools for growth.  \n\nThose tools are very appealing to mobile and even web-app developers and Firebase has been successful in that market with customers including The New York Times, Lyft and Duolingo just to name a few. But even with all of the fantastic BaaS tools Firebase brings to bear on a project, it is critical to have source code management and [CI/CD tools](/topics/ci-cd/) to match. As Firebase configuration for important settings such as database security, serverless functions, and hosting can all be stored “as-code” inside your application’s repository, GitLab paired with Firebase can make for a powerful duo.\n\n## Our app\n\nOur application will be a relatively simple link shortener for use with the domain [labwork.dev](https://labwork.dev). In order to build a link shortener, we’ll need the ability to log users in, a database for storing the links and a way to redirect folks coming with the short links to the longer website. Firebase comes with these items packaged together - which should make it relatively painless to stand up (famous last words right?).\n\nI plan on covering the application in more detail in the future, or if you want to jump to the end you can find the [completed project here](https://gitlab.com/brendan-demo/labwork/homepage/). For now, I wanted to at least introduce the architecture plan. I’ll use [Vue.js](https://vuejs.org) for the frontend. Vue.js is a web application that lets users log in using Firebase Authentication. Once logged in, users will have access to a form that allows them to create new short URLs. That form will call a Firebase Function that checks to see if the shortcode requests already exist (or create a random hash if not specified). If the shortcode is unique, the function adds the shortcode and longer URL to the `urls` collection in Firestore and returns okay.  \n\nOnce the shortcode is in the database, I’ll use another cloud function to retrieve the long URL associated with it. Firestore has a great feature that allows you to redirect traffic based on a pattern to a specified function, and I’ll use this so that anything that comes to `/go/{shortcode}` gets magically redirected to the correct long URL.\n\n![Basic Architecture Diagram](https://about.gitlab.com/images/blogimages/firebase_01.png){: .shadow.large.center}\n\n## Add Firebase to the project\n\nOnce we have this architecture finalized, and have built the skeleton of the project and are ready to start deploying and testing, it’s time to add Firebase to our project. Firebase provides a [very helpful CLI tool](https://github.com/firebase/firebase-tools) for getting started here and we’ll use that to begin.\n\nThe first command `firebase init` starts the project initialization process.\n\n![Output of firebase init command](https://about.gitlab.com/images/blogimages/firebase_02.png){: .shadow.large.center}\n\nFrom there, you can select which services you want to use with this project. You’ll also be able to decide to create a new Firebase project, or use one you previously created in the [Firebase console](https://console.firebase.google.com/). You also can select where to store the configuration files. I’ll add a folder called `firebase-config` to store all of these files. Now you are able to source control all changes to your Firebase architecture - from indexes to security rules - all in the same repository as your project.\n\n![Firebase config files](https://about.gitlab.com/images/blogimages/firebase_03.png){: .shadow.large.center}\n\nYou can see all of the changes required to add Firebase to the project [in this merge request](https://gitlab.com/brendan-demo/labwork/homepage/-/merge_requests/1).\n\n## Deploy project to Firebase\n\nNow that Firebase is installed in our project folder and configured, we’re ready to deploy for the first time. In order to deploy the Vue.js portion of the project, we first need to build it to production HTML, CSS and Javascript. So before deployment, run the `yarn build` command.  This will output the build to the `dist` folder by default, and I’ve configured Firebase to recognize that directory as the hosting direction in the `firebase.json`.\n\n![Firebase.json example](https://about.gitlab.com/images/blogimages/firebase_04.png){: .shadow.large.center}\n\nOnce the project is built, running a simple `firebase deploy` will deploy ALL of the features of the project to Firebase: the security rules and indexes for Firestore, the Firebase Functions and the Vue.js project to Firebase Hosting.\n\nIf desired, we can also chose to deploy just a particular part of the project with the `--only` flag. For example, to only deploy a new version of the functions, we can say \n\n`firebase deploy --only functions`\n\nThis is a feature that we’ll combine with GitLab CI/CD in the next step to make our deployments as efficient as possible.\n\n## Automate deployments with GitLab CI/CD\n\nNow that we have the project deploying, we can automate that deploy process so that we don’t have to be at our computer authenticated to Firebase in order to deploy new changes. The steps to automate the deploy are relatively painless and include: (1) acquire a Firebase API key to use during deployment, (2) setup the `.gitlab-ci.yml` file to install the firebase CLI before running any other steps and (3) issue the deployment commands for each part of the infrastructure depending on the change in a particular commit to the main branch.\n\nFirst, we need an API key so that GitLab CI/CD can authenticate to Firebase and perform the deploy. To get the API key, we can run `firebase login:ci` from the same place we were deploying the application previously. This will provide a key that looks something like `` which we’ll add to GitLab.\n\nWhen you enter `firebase login:ci`, open the URL provided in your browser. That will open a Google authentication page; then log in with your Google account and click `Allow`.  Then return to the terminal and you’ll see the authentication code.\n\n![Output of firebase login:ci command](https://about.gitlab.com/images/blogimages/firebase_05.png){: .shadow.large.center}\n\nOnce you’ve successfully authenticated and obtained the token, go to your project on GitLab and go to Settings -> CI/CD -> Variables. Here’s where we’ll add the token as an environmental variable to be used in our deployment jobs. The key is `FIREBASE_TOKEN` and then the value is the token that was printed to your terminal. I’ve made mine both a [protected](https://docs.gitlab.com/ee/ci/variables/#protected-environment-variables) and [masked](https://docs.gitlab.com/ee/ci/variables/#masked-variables) variable. That means the variable will only be exposed to protected branches and if it’s accidentally echoed to the job output, GitLab will hide it from leaking into there.\n\n![Varaiable configuration screen in GitLab](https://about.gitlab.com/images/blogimages/firebase_06.png){: .shadow.large.center}\n\nNow we can start on the configuration for our `.gitlab-ci.yml`.  At the top of the file I’m going to set the default image to be the current node alpine image from Docker hub:\n\n```yaml\nimage: node:12.13.0-alpine\n```\n\nNext, I’ll create a `before_script` that will install the firebase CLI before running any jobs in the file. In the future, I could bundle that CLI into my own custom Docker image to avoid doing this every time, but for now I’ll go with the boring solution.\n\n```yaml\nbefore_script:\n  - npm i -g firebase-tools\n```\n\nFor the build steps, I want to create a separate job for each part of the infrastructure: Firestore, Functions and the Vue app into Firebase Hosting. To do this, I’m going to utilize the ﻿﻿[`only:`](https://docs.gitlab.com/ee/ci/yaml/#only--except) feature to only deploy that part of the infrastructure impacted by changes and that have been merged to master. For example, we’ll only deploy the Firebase Functions when something changes in the `/functions` on the `master` branch\n\n```yaml\ndeploy-functions:\n  stage: deploy\n  script:\n    - cd functions\n    - npm install\n    - cd ..\n    - firebase deploy --only functions --token $FIREBASE_TOKEN\n  only:\n    refs:\n      - master\n    changes:\n      - functions/**/*\n```\nWe’ll repeat this same pattern for both Firestore and the Hosting project, adding the `yarn build` step before deploying hosting each time. Once that’s completed, every time a merge request is accepted, GitLab CI/CD will automatically deploy the changes into our live production application. You can view the [completed `.gitlab-ci.yml` here](https://gitlab.com/brendan-demo/labwork/homepage/-/blob/master/.gitlab-ci.yml), or check out the link shortener for yourself (and try and [Rick Roll](https://labwork.dev/go/30201a) your friends at [labwork.dev](https://labwork.dev)).\n",[110,873,232],{"slug":5492,"featured":6,"template":678},"gitlab-ci-cd-with-firebase","content:en-us:blog:gitlab-ci-cd-with-firebase.yml","Gitlab Ci Cd With Firebase","en-us/blog/gitlab-ci-cd-with-firebase.yml","en-us/blog/gitlab-ci-cd-with-firebase",{"_path":5498,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5499,"content":5504,"config":5509,"_id":5511,"_type":16,"title":5512,"_source":17,"_file":5513,"_stem":5514,"_extension":20},"/en-us/blog/gitlab-eks-integration-how-to",{"title":5500,"description":5501,"ogTitle":5500,"ogDescription":5501,"noIndex":6,"ogImage":5327,"ogUrl":5502,"ogSiteName":692,"ogType":693,"canonicalUrls":5502,"schema":5503},"How to create a Kubernetes cluster on Amazon EKS in GitLab","A Kubernetes tutorial: Create clusters in a few clicks with GitLab and Amazon EKS.","https://about.gitlab.com/blog/gitlab-eks-integration-how-to","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to create a Kubernetes cluster on Amazon EKS in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2020-03-09\",\n      }",{"title":5500,"description":5501,"authors":5505,"heroImage":5327,"date":5506,"body":5507,"category":14,"tags":5508},[1161],"2020-03-09","Kubernetes has created a whole new world for running infrastructure at scale. With the right setup, an application can go from serving a few users to millions effortlessly. Setting up Kubernetes can be tasking and can require a lot of expertise to put all the pieces together. You’ll need to set up virtual or bare metal machines to use as nodes and manage SSL certificates, networking, load balancers, and many other moving parts.\n\nThe introduction of Amazon Elastic Kubernetes Service (EKS) was widely applauded as it streamlines the abstraction of the complexities in an environment most organizations are already familiar with and on a provider they already trust. Amazon EKS makes creating and managing Kubernetes clusters easier with more granular controls around security and straightforward policies of how resources are used.\n\nGitLab strives to increase developer productivity by automating repetitive tasks and allowing developers to focus on business logic. We recently introduced support for auto-creating Kubernetes clusters on Amazon EKS. In a few clicks with the right permissions, you’ll have a fully functional Kubernetes cluster on Amazon EKS. It doesn’t stop there however – GitLab also gives you the power to achieve the following use cases and more :\n\n* [Highly scalable CI/CD system using GitLab Runner](https://docs.gitlab.com/runner/): There are times like holidays when little to no changes to code are pushed to production, so why keep resources tied down? With the Amazon EKS integration with GitLab, you can install GitLab Runner with just a click and your CI/CD will run effortlessly without worrying about running out of resources.\n* Shared Cluster: Maintaining multiple Kubernetes clusters can be a pain and capital intensive. With Amazon EKS, GitLab allows you to setup a cluster at [Instance](https://docs.gitlab.com/ee/user/instance/clusters/index.html), [Group](https://docs.gitlab.com/ee/user/group/clusters/index.html) and [Project](https://docs.gitlab.com/ee/user/project/clusters/) levels. Kubernetes Namespaces are created for each GitLab project when the Amazon EKS is integrated at Instance and Project level, allowing isolation and ensuring security.\n* [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/index.html): Reviewing changes to code or design can be tricky, you’ll need to check out your branch and run the code in a test environment. GitLab integrated with Amazon EKS deploys your app with new changes to a dynamic environment and all you need to do is click on a “View App“ button to review changes.\n* [AutoDevOps](https://docs.gitlab.com/ee/topics/autodevops/cloud_deployments/auto_devops_with_gke.html) takes DevOps to a whole new level. AutoDevOps detects, builds, tests, deploys, and monitors your applications, leveraging the Amazon EKS integration. All you have to do is push your code and the magic happens. In this tutorial, we will deploy a sample application to the Amazon EKS cluster we will be creating using AutoDevOps.\n\nTo show you how easy it is to create an Amazon EKS cluster from GitLab, the rest of this tutorial will walk you through the steps of the integration, starting with a one-time setup of necessary resources on AWS.\n\n## One-time setup on AWS to access resources\n\nFirst, we need to create a “provision\" role and a “service” role on AWS to grant GitLab access to your AWS resources and set up the necessary permissions to create and manage EKS clusters. You only need to perform these steps once and you can reuse them anytime you want to perform another integration or create more clusters.\n\n### Step 1 - Create Provision Role\n\nTo grant GitLab access to your AWS resources, a “provision role” is required. Let’s create one:\n\n1. Access GitLab Kubernetes Integration Page by clicking on the ”Kubernetes” menu for groups and Operations > Kubernetes menu for projects and click the “Add Kubernetes Cluster” button.\n2. Select “Amazon EKS” in the options provided under the “Create new cluster on EKS” tab.\n3. You are provided with an Account and External ID  to use for authentication. Make note of these values to be used in a later step.\n\n    ![Gitlab EKS Integration Page](https://about.gitlab.com/images/blogimages/gitlab-eks-integration/gitlab_eks_integration_page.png)\n\n4. Open IAM Management Console in another tab and click on “Create Role”\n5. Click on the “Another AWS account” tab and provide the Account and External ID obtained from GitLab and click Next to set permissions as shown below:\n\n    ![AWS Provision Role](https://about.gitlab.com/images/blogimages/gitlab-eks-integration/provision_role.png)\n\n6. On the permissions page, click on “Create policy.” This will open a new tab where you can set either of the permissions below using JSON:\n\n    ```json\n    {\n        \"Version\": \"2012-10-17\",\n        \"Statement\": [\n            {\n                \"Effect\": \"Allow\",\n                \"Action\": [\n                    \"autoscaling:*\",\n                    \"cloudformation:*\",\n                    \"ec2:*\",\n                    \"eks:*\",\n                    \"iam:*\",\n                    \"ssm:*\"\n                ],\n                \"Resource\": \"*\"\n            }\n        ]\n    }\n    ```\n\n    This gives GitLab full access to create and manage resources, as seen in the image below:\n\n    ![AWS Role Policy](https://about.gitlab.com/images/blogimages/gitlab-eks-integration/create_role_policy.png)\n\n    If you prefer limited permission, you can give GitLab the ability to create resources, but not delete them with the JSON snippet below. The drawback here is if an error is encountered during the creation process, changes will not be rolled back and you must remove resources manually. You can do this by deleting the relevant CloudFormation stack.\n\n    ```json\n    {\n        \"Version\": \"2012-10-17\",\n        \"Statement\": [\n            {\n                \"Effect\": \"Allow\",\n                \"Action\": [\n                    \"autoscaling:CreateAutoScalingGroup\",\n                    \"autoscaling:DescribeAutoScalingGroups\",\n                    \"autoscaling:DescribeScalingActivities\",\n                    \"autoscaling:UpdateAutoScalingGroup\",\n                    \"autoscaling:CreateLaunchConfiguration\",\n                    \"autoscaling:DescribeLaunchConfigurations\",\n                    \"cloudformation:CreateStack\",\n                    \"cloudformation:DescribeStacks\",\n                    \"ec2:AuthorizeSecurityGroupEgress\",\n                    \"ec2:AuthorizeSecurityGroupIngress\",\n                    \"ec2:RevokeSecurityGroupEgress\",\n                    \"ec2:RevokeSecurityGroupIngress\",\n                    \"ec2:CreateSecurityGroup\",\n                    \"ec2:createTags\",\n                    \"ec2:DescribeImages\",\n                    \"ec2:DescribeKeyPairs\",\n                    \"ec2:DescribeRegions\",\n                    \"ec2:DescribeSecurityGroups\",\n                    \"ec2:DescribeSubnets\",\n                    \"ec2:DescribeVpcs\",\n                    \"eks:CreateCluster\",\n                    \"eks:DescribeCluster\",\n                    \"iam:AddRoleToInstanceProfile\",\n                    \"iam:AttachRolePolicy\",\n                    \"iam:CreateRole\",\n                    \"iam:CreateInstanceProfile\",\n                    \"iam:CreateServiceLinkedRole\",\n                    \"iam:GetRole\",\n                    \"iam:ListRoles\",\n                    \"iam:PassRole\",\n                    \"ssm:GetParameters\"\n                ],\n                \"Resource\": \"*\"\n            }\n        ]\n    }\n    ```\n\n    The image below visualizes what permissions are granted:\n\n    ![Limited Role Policy](https://about.gitlab.com/images/blogimages/gitlab-eks-integration/limited_role_policy.png)\n\n7. Once the policy is created, return to the “Create Role” browser tab and refresh to see the policy we created listed. Select the policy and click “Next.”\n8. In the Tags section, we don’t need to set any Tags, except if it’s required in your organization. Let’s proceed to Review.\n9. Specify a Name for your new Role. You will see the policy we created listed under policies and click “Create Role” to complete the process.\n10. Click on the new Role you created in the list of Roles to view its details. You may have to search for it in the list of Roles if it’s not listed in the first view. Copy the Role ARN provided – we will need it on the GitLab Kubernetes Integration page.\n\n### Step 2 - Create Service Role\n\nThe Service Role is required to allow Amazon EKS and the Kubernetes control plane to manage AWS resources on your behalf.\n\n1. In the IAM Management Console, click on “Create Role” and select the “AWS service” tab.\n2. Select EKS in the list of services and Use Cases as shown below and click Next.\n\n    ![Service Role](https://about.gitlab.com/images/blogimages/gitlab-eks-integration/service_role.png)\n\n3. You will notice the “AmazonEKSClusterPolicy” and “AmazonEKSServicePolicy” permissions are selected; these are all we need. Click through the Tags step and create if necessary, then click Next to get to the Review step. Click “Create Role” to complete the process.\n\n    ![Role Summary](https://about.gitlab.com/images/blogimages/gitlab-eks-integration/role_summary.png)\n\n## GitLab EKS Integration\n\nThis is the easy part! As mentioned earlier, you only need to create the Provision and Service role once if you don’t already have them in your organization’s AWS setup. You can reuse the roles for other integrations or cluster creations.\n\n1. Return to the GitLab Kubernetes Integration page and provide the Role ARN of the Provision Role we created earlier and click “Authenticate with AWS.”\n\n    ![Gitlab EKS Integration Page](https://about.gitlab.com/images/blogimages/gitlab-eks-integration/gitlab_eks_integration_page.png)\n\n2. Once authenticated, you’ll have a page to set the parameters needed to set up your cluster as shown in the image below and click on “Create Kubernetes Cluster” to let GitLab do its magic!\n\n    The parameters you’ll need to provide are:\n    * **Kubernetes cluster name** - The name you wish to give the cluster.\n    * **Environment scope** - The [GitLab environment](https://docs.gitlab.com/ee/user/project/clusters/index.html#setting-the-environment-scope) associated with this cluster; `*` denotes the cluster will be used for deployments to all environments.\n    * **Kubernetes version** - The Kubernetes version to use. Currently, the only version supported is 1.14.\n    * **Role name** - The service role we created earlier.\n    * **Region** - The [AWS region](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html) in which the cluster will be created.\n    * **Key pair name** - Select the [key pair](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) that you can use to connect to your worker nodes if required.\n    * **VPC** - Select a [VPC](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html) to use for your EKS Cluster resources.\n    * **Subnets** - Choose the [subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) in your VPC where your worker nodes will run.\n    * **Security group** - Choose the [security group](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) to apply to the EKS-managed Elastic Network Interfaces that are created in your worker node subnets. AWS provides a default group, which can be used for the purpose of this guide. However, you are advised to setup up the right rules required for your resources.\n    * **Instance type** - The AWS [instance type](https://aws.amazon.com/ec2/instance-types/) of your worker nodes.\n    * **Node count** - The number of worker nodes.\n    * **GitLab-managed cluster** - Leave this checked if you want [GitLab to manage namespaces and service accounts](https://docs.gitlab.com/ee/user/project/clusters/index.html#gitlab-managed-clusters) for this cluster.\n\n    ![Gitlab EKS Integration Page](https://about.gitlab.com/images/blogimages/gitlab-eks-integration/gitlab_eks_integration_post_auth.png)\n\n3. The cluster creation process will take approximately 10 minutes. Once done you can proceed to install some predefined applications. At the very least, you need to install the following:\n    - **Helm Tiller**: This is required to install the other applications.\n    - **Ingress**: This provides SSL termination, load balancing and name-based virtual hosting you your applications. It acts as a web proxy for your application, which is useful when using AutoDevOps or deploying your own apps.\n    - **Cert Manager**: This is a native Kubernetes certificate management controller, which helps in issuing certificates using Let’s Encrypt. You don’t need this if you want to use a custom Certificate issuer.\n    - **Prometheus**: GitLab uses the Prometheus integration for automatic monitoring of your applications to collect metrics from Kubernetes containers allowing you to understand what is going on from within the GitLab UI.\n\n    ![Gitlab EKS Integration Page](https://about.gitlab.com/images/blogimages/gitlab-eks-integration/gitlab_eks_integration_post_cluster.png)\n\n4. To make use of Auto Review Apps and Auto Deploy stages of [AutoDevOps](https://docs.gitlab.com/ee/topics/autodevops/quick_start_guide.html), you will need to specify a Base Domain name with a wild card DNS pointing to the Ingress Endpoint generated when you Install Ingress in the list of predefined apps.\n\n## Summary\n\nIn this tutorial, we looked at how GitLab integrates with Amazon EKS, allowing Kubernetes clusters to be created easily from the GitLab UI after setting the right permissions. As we’ve seen, developer productivity is greatly improved by no longer having to manually set up clusters. Also, the same cluster can be used for multiple projects when Amazon EKS is integrated with GitLab at the Group and Instance levels, thus making onboarding new projects a breeze. After integration, the possibilities of what developers can achieve is enormous.\n\nIn the next part of this tutorial, we will look at how to deploy your applications on an Amazon EKS cluster using AutoDevOps.\n",[1002,749,2932],{"slug":5510,"featured":6,"template":678},"gitlab-eks-integration-how-to","content:en-us:blog:gitlab-eks-integration-how-to.yml","Gitlab Eks Integration How To","en-us/blog/gitlab-eks-integration-how-to.yml","en-us/blog/gitlab-eks-integration-how-to",{"_path":5516,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5517,"content":5523,"config":5528,"_id":5530,"_type":16,"title":5531,"_source":17,"_file":5532,"_stem":5533,"_extension":20},"/en-us/blog/unifylogsmetrics",{"title":5518,"description":5519,"ogTitle":5518,"ogDescription":5519,"noIndex":6,"ogImage":5520,"ogUrl":5521,"ogSiteName":692,"ogType":693,"canonicalUrls":5521,"schema":5522},"How to integrate operation logs and metrics in GitLab","We've added Elasticsearch to our monitoring solution so you can see all your logs and metrics in one view. Here's a first look at this new feature, released in GitLab 12.8.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666923/Blog/Hero%20Images/logs.png","https://about.gitlab.com/blog/unifylogsmetrics","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to integrate operation logs and metrics in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2020-03-03\",\n      }",{"title":5518,"description":5519,"authors":5524,"heroImage":5520,"date":5525,"body":5526,"category":14,"tags":5527},[1020],"2020-03-03","\nLogging is one of the most powerful tools we have when trying to understand an application problem. It is a common practice – when things go wrong in production, one of the first requests is often, \"Please send me the logs!\" Raw logs contain useful information which can help pinpoint the root cause(s) of problems.\n\nBut using raw logs isn’t always a straightforward process. This is especially true, in a modern, distributed, and often ephemeral architecture. Ideally logs should be available across the entire application, be searchable and offer at least some access to past history. Historically aggregated logging solutions, if they existed, were only piecemeal. This forced developers to spend time and energy tracking down important log data which ultimately resulted in logs being far less useful than they could be.\n\nWith the [12.8 release](/releases/2020/02/22/gitlab-12-8-released/), to ease navigating through logs, we added [Elastic log Stack](https://docs.gitlab.com/ee/integration/advanced_search/elasticsearch.html) as our log aggregation tool and [Log Explorer](/releases/2020/02/22/gitlab-12-8-released/#explore-aggregated-logs) so you can interact with all your logs in one place.\n\nBut before we look at the logging capabilities, let’s take a step back and look at the big picture.\n\n##  Why monitoring matters\n\nAt GitLab, we aim to provide users with a complete [DevSecOps platform](/solutions/security-compliance/), delivered as a single application. To do so, we have divided the DevSecOps lifecycle into [ten different stages](/stages-devops-lifecycle/). The final Ops stage of the [DevOps](/topics/devops/) loop, [Monitor](/direction/monitor/), should occur right after the production environment is configured and the application deployed. This is a critical stage that should not be ignored.\n\nIn fact, it’s commonly believed in the DevOps world that no developer should ship code into production without monitoring, as it will help ensure an application behaves as expected. If something isn’t right, you will be alerted, (hopefully before your users start to complain). If you are thinking about ignoring monitoring, always remember _customers_ are the most expensive monitoring solution you can have.\n\n### Chasing Observability\n\nObservability is the ability to infer internal states of a system based on the system’s external outputs. Monitoring, on the other hand is the activity of observing the state of a system over time. To achieve observability, your system’s telemetries, including metrics, traces, and logs should all be available to enable proactive introspection and enable greater operational visibility. We believe that DevOps practitioners should have observability as a goal.\n\nGitLab’s vision for the Monitor category is to build a consolidated and integrated observability tool which will, over time, displace today's front-runner in modern observability. In pursuit of this vision and to focus our efforts, we are building our solutions with a cloud native first principle to solve the cloud native problem by selecting the open source products which are cloud native compatible. And, in fact, as part of GitLab’s [New Year’s gift for 2020](/blog/observability/) we're moving a big portion of the observability features – custom metrics, logging, tracing and alerting – from our proprietary codebase to the open source codebase this year.\n\n### Metrics & Traces\n\nToday, if you use GitLab to deploy your application into a Kubernetes cluster, with a push of a button you can deploy monitoring (via a Prometheus instance) into that cluster. [Prometheus](https://prometheus.io/) will automatically start collecting key metrics from your deployed application (such as latency, error rate, and throughput), and send it directly into GitLab UI. In addition to the out-of-the-box metrics and dashboard, it is possible to customize Prometheus directly from the GitLab UI (using PromQL) to collect any metric you desire and present it on a dashboard. You can set up a threshold, create an alert on it, and open an issue as a part of an incident management solution, all without leaving the GitLab UI.\n\nAs a developer, when there is an issue - you want to drill down to the exact function or micro service that is causing the trouble. GitLab uses [Jaeger](https://www.jaegertracing.io/), an end-to-end distributed tracing system for microservices-based distributed systems.\n\n## Get started with logs\n\nBefore the 12.8 release, existing Monitor stage users already had the ability to view pod logs directly from within the GitLab UI. However, this was done only through the available Kubernetes APIs. Viewing logs with the Kubernetes APIs is limited to allowing a log-tailing experience on a specific pod from multiple environments only.\n\nWith the 12.8 release any user can deploy Elastic stack - a specific flavor of Elasticsearch alongside a component called [Filebeat](https://www.elastic.co/beats/filebeat) - to a Kubernetes cluster with the push of a button, (similar to the way we deploy Prometheus). Once deployed, it automatically starts collecting all logs that are coming from the cluster and applications across the available environments (production, staging, testing, etc.) and they will be surfaced in the GitLab UI. In addition users can also navigate directly from the metric chart to the log explorer while preserving the context.\n\nThis is extremely critical when triaging an incident or validating the status of your service. In the cloud-native world aggregation of logs for observability becomes critical as logs are distributed across multiple pods and services. Using our new solution you will be able to get an aggregated view of all logs across multiple services and infrastructures, go back in time, search through logs, and more.\n\n##  What's next\n\nI hope you found this overview useful. To get started, download GitLab and read its documentation for more in-depth coverage of the functionality. One of the fastest ways to experience GitLab features is to use the .com version — which is a hosted GitLab.\n\nIf you would like to get in touch with the Monitoring team please comment and contribute to the linked [issues](https://gitlab.com/groups/gitlab-org/-/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=group%3A%3Aapm&label_name[]=Category%3ALogging) and [epics](https://gitlab.com/groups/gitlab-org/-/epics?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=group%3A%3Aapm&label_name[]=Category%3ALogging) on this page. Sharing your feedback directly on GitLab.com is the best way to contribute to our strategy and vision.\n\nIf you're a GitLab user and have direct knowledge of your logging usage, we'd especially love to hear your use case(s).\n\nWatch my entire YouTube video on logging:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/hWclZHA7Dgw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[1002,703,749],{"slug":5529,"featured":6,"template":678},"unifylogsmetrics","content:en-us:blog:unifylogsmetrics.yml","Unifylogsmetrics","en-us/blog/unifylogsmetrics.yml","en-us/blog/unifylogsmetrics",{"_path":5535,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5536,"content":5542,"config":5548,"_id":5550,"_type":16,"title":5551,"_source":17,"_file":5552,"_stem":5553,"_extension":20},"/en-us/blog/protecting-manual-jobs",{"title":5537,"description":5538,"ogTitle":5537,"ogDescription":5538,"noIndex":6,"ogImage":5539,"ogUrl":5540,"ogSiteName":692,"ogType":693,"canonicalUrls":5540,"schema":5541},"How to limit access to manual pipeline gates and deployments using GitLab","Let's look at how to use protected environments to set up access controls for production deployments and manual gates.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681105/Blog/Hero%20Images/protect_manual_jobs.jpg","https://about.gitlab.com/blog/protecting-manual-jobs","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to limit access to manual pipeline gates and deployments using GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Thao Yeager\"}],\n        \"datePublished\": \"2020-02-20\",\n      }",{"title":5537,"description":5538,"authors":5543,"heroImage":5539,"date":5545,"body":5546,"category":14,"tags":5547},[5544],"Thao Yeager","2020-02-20","\n\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2020-02-21.\n{: .alert .alert-info .note}\n\nIn our world of automation, why would anyone want to do something manually? Manual has become almost synonymous with inefficient. But, when it comes to CI/CD pipelines, a properly configured **manual** job can be a powerful way to control deployments and satisfy compliance requirements. Let’s take a look at how manual jobs can be defined to serve two important use cases: Controlling who can deploy, and setting up manual gates.\n\n## Limit access to deploy to an environment\n\nDeploying to production is a mission-critical occurence that should be protected. Projects with a Kubernetes cluster could benefit from moving to a continuous deployment (CD) model in which a [branch or merge request, once merged, is auto-deployed to production](https://docs.gitlab.com/ee/topics/autodevops/index.html#auto-deploy), and the absence of human intervention avoids mishaps. But for projects not yet configured for CD, let's consider this use case: Imagine a pipeline with a manual job to deploy to prod, which can be triggered by any user with access to push code. The risk of a unplanned, unintended production deployment is very real.\n\nFortunately, it’s possible to use [protected environments](https://docs.gitlab.com/ee/ci/environments/protected_environments/) to prevent just anyone from deploying to production. When [configuring a protected environment](https://docs.gitlab.com/ee/ci/environments/protected_environments.html#protecting-environments), you can define the roles, groups, or users to whom deploy access is granted. The protected environment can then be defined in a manual job to deploy which limits who can run it. The configuration could look something like this:\n\n```yaml\ndeploy_prod:\n  stage: deploy\n  script:\n    - echo \"Deploy to production server\"\n  environment:\n    name: production\n    url: https://example.com\n  when: manual\n  only:\n    - master\n```\n\nIn the example above, the keyword `environment` is used to reference a protected environment (as [configured in project settings](https://docs.gitlab.com/ee/ci/environments/protected_environments.html#protecting-environment)) with a list of users who can run the job, in this case deploy to the named environment. Users without access see a disabled **play** button and are unable to execute the job.\n\n## Add an approval step\n\nCompliance rules may specify that approval is required for certain activities in a workflow, even if they aren't technically a deployment step themselves. In this use case, an approval step can also be added in the pipeline that prompts an authorized user to take action to continue. This can be achieved by structuring your pipeline with an \"approve\" stage containing a special manual job – for example, the YAML to insert an approval stage before deployment could look like this:\n\n```yaml\nstages:\n  - build\n  - approve\n  - deploy\n\nbuild:\n  stage: build\n  script:\n    - echo Hello!\n\napprove:\n  stage: approve\n  script:\n    - echo Hello!\n  environment:\n    name: production\n    url: https://example.com\n  when: manual\n  allow_failure: false\n  only:\n    - master\n\ndeploy:\n  stage: deploy\n  script:\n    - echo Hello!\n  environment:\n    name: production\n    url: https://example.com\n  only:\n    - master\n```\n\nIn the YAML above, `allow_failure: false` [defines the manual job as \"blocking\"](https://docs.gitlab.com/ee/ci/yaml/#whenmanual), which will cause the pipeline to pause until an authorized user gives \"approval\" by clicking on the **play** button to resume. Only the users part of that environment list will be able to perform this action. In this scenario, the UI view of the pipeline in the example CI configuration above would look like this:\n\n![Pipeline view of approval stage manual job](https://about.gitlab.com/images/blogimages/manual_job_approve_stage_ui.png){: .shadow}\n\n## Summary\n\nAs illustrated in the YAML examples and image above, manual jobs defined with protected environments and blocking attributes are effective tools for handling compliance needs as well as for ensuring there are proper controls over production deployments.\n\nTell us how using protected environments with manual jobs has secured your deployments or whether blocking manual jobs helps you meet compliance and auditing. [Create an issue in the GitLab project issue tracker](https://gitlab.com/gitlab-org/gitlab/issues/new) to share your feedback with us.\n\nCover image by [Diane Walton](https://unsplash.com/photos/BNnzmBmnPg4) on [Unsplash](https://unsplash.com)\n{: .note}\n",[110,2932,727,749,894],{"slug":5549,"featured":6,"template":678},"protecting-manual-jobs","content:en-us:blog:protecting-manual-jobs.yml","Protecting Manual Jobs","en-us/blog/protecting-manual-jobs.yml","en-us/blog/protecting-manual-jobs",{"_path":5555,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5556,"content":5562,"config":5568,"_id":5570,"_type":16,"title":5571,"_source":17,"_file":5572,"_stem":5573,"_extension":20},"/en-us/blog/how-were-building-up-performance-testing-of-gitlab",{"title":5557,"description":5558,"ogTitle":5557,"ogDescription":5558,"noIndex":6,"ogImage":5559,"ogUrl":5560,"ogSiteName":692,"ogType":693,"canonicalUrls":5560,"schema":5561},"How GitLab's QA Team Leverages Performance Testing Tools","We built our open source GitLab Performance tool to evaluate pain points and implement solutions on every GitLab environment.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681087/Blog/Hero%20Images/performance-server-front.jpg","https://about.gitlab.com/blog/how-were-building-up-performance-testing-of-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How our QA team leverages GitLab’s performance testing tool (and you can too)\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Grant Young\"}],\n        \"datePublished\": \"2020-02-18\",\n      }",{"title":5563,"description":5558,"authors":5564,"heroImage":5559,"date":5565,"body":5566,"category":14,"tags":5567},"How our QA team leverages GitLab’s performance testing tool (and you can too)",[911],"2020-02-18","\n\nWe’ve set up several initiatives aimed at testing and improving the performance of GitLab, which is why the Quality team built a new tool to test GitLab's performance.\n\nPerformance testing is an involved process and distinct from other testing disciplines. The strategies and tooling in this space are specialized and require dedicated resources to achieve results. When I joined the company and became the first member of this team, the task was to expand our nascent performance efforts to a much larger scale. For this, we needed to build out a new tool that we aptly named the [GitLab Performance tool](https://gitlab.com/gitlab-org/quality/performance) (GPT).\n\nWe're happy to announce the general release of [GPT](https://gitlab.com/gitlab-org/quality/performance/-/releases). In this blog post, we'll share how GPT is used to performance test GitLab, and how you can use it as well to test your own environments.\n\nHowever, before we get into what the GPT is, we need to first touch on what we use it with.\n\n## Reference Architectures and test data\n\nIn our experience, the challenging part of performance testing isn’t actually to do with the testing itself, but instead configuring the right environments and data to test against.\n\nAs such, one of the initiatives we’ve been driving is the design of several [GitLab Reference Architectures](https://docs.gitlab.com/ee/administration/reference_architectures/index.html#available-reference-architectures) that can handle large numbers of users. We wanted to create these architectures as a way to standardize our recommended configurations to ensure we were presenting customers with options for performant, scalable, and highly available GitLab setups.\n\nIn order to create a tool like this, we needed to add realistic data into these environments to test against, e.g., large projects with commits and merge requests. As a first iteration, we started with our very own GitLab project.\n\nOnce we got our environments running and configured we were ready to test them with the GPT.\n\n## What is the GitLab Performance tool (GPT)?\n\nThe GPT can be used to run numerous load tests to verify the performance of any GitLab environment. All that’s required is to a knowledge of what throughput the intended environment can handle (as requests per second) and to ensure that the environment has the necessary data prepared.\n\nThe GPT is built upon one of the leading tools in the industry, [k6](https://k6.io/). Here are some examples of what the GPT provides:\n\n* A broad test suite that comes out-of-the-box and covers various endpoints across the GitLab product with added ability to add your own custom tests as desired. [See the latest out-of-the-box test details](https://gitlab.com/gitlab-org/quality/performance/-/wikis/current-test-details) with more being added frequently.\n* [Options](https://gitlab.com/gitlab-org/quality/performance/-/blob/master/docs/k6.md#options) for customizing test runs, such as specifying desired GitLab environment data or defining what throughput to use with default examples given.\n* [Ability to run multiple tests sequentially as well as be selective about which are chosen](https://gitlab.com/gitlab-org/quality/performance/-/blob/master/docs/k6.md#running-the-tests-with-the-tool).\n* [Enhanced reporting and logging](https://gitlab.com/gitlab-org/quality/performance/-/blob/master/docs/k6.md#running-the-tests-with-the-tool).\n* [Built-in test success thresholds](https://gitlab.com/gitlab-org/quality/performance/-/blob/master/docs/k6.md#test-thresholds) based on [time to first byte](https://en.wikipedia.org/wiki/Time_to_first_byte), throughput achieved and successful responses.\n\nThe talented team at [Load Impact](https://loadimpact.com/) created [k6](https://k6.io/), which is the core of the GPT. We realized quickly that we didn’t need to reinvent the wheel because k6 met most of our needs: It is written in Go, so is very performant and is an open source solution. Thanks to the team for not only developing k6 but also for reaching out to us soon after we started to collaborate.\n\n## How we use GPT\n\nWe use the GPT in several automated [GitLab CI pipelines](/blog/guide-to-ci-cd-pipelines/) for quick feedback on how GitLab is performing. The CI pipelines typically run daily or weekly against our reference architecture environments, which themselves are running on the latest pre-release code. We review the test results as they come in and then investigate any failures. In line with our [Transparency value](https://handbook.gitlab.com/handbook/values/#transparency), we also publish all of the [latest results](https://gitlab.com/gitlab-org/quality/performance/-/wikis/Benchmarks/Latest) for anyone to view on the [GPT wiki](https://gitlab.com/gitlab-org/quality/performance/-/wikis/home).\n\nThe GPT is also used in a comparison test pipeline to see how GitLab’s performance changes in every release cycle. These results are important because they show the whole picture of our performance evolution. The [benchmark comparison results](https://gitlab.com/gitlab-org/quality/performance/-/wikis/Benchmarks/GitLab-Versions) are also available on the [GPT wiki](https://gitlab.com/gitlab-org/quality/performance/-/wikis/home).\n\nBy using the GPT, we’ve been able to identify several performance pain points of GitLab and collaborate with our dev teams to prioritize improvements. The process has been fruitful so far and we’re excited to already see improvements in the performance numbers with each release of GitLab. The 12.6 release for example showed [several notable improvements across the board](https://gitlab.com/gitlab-org/quality/performance/-/wikis/Benchmarks/GitLab-Versions#comparisions), one even as high as a 92% reduction! You can see the issues we've raised so far through this work over on our [issue tracker](https://gitlab.com/gitlab-org/gitlab/issues?scope=all&utf8=%E2%9C%93&state=all&label_name[]=Quality%3Aperformance-issues).\n\n## How you can use GPT\n\nWe decided early that we wanted to follow the same open source principles as our main product, so we build the GPT with all users in mind rather than making it a strictly internal tool. So not only do we let others use it, we encourage it! This is beneficial for us and customers, as we receive feedback from diverse viewpoints that we hadn’t considered. Some examples of this are [improving the recommended spec guidelines based on throughput](https://gitlab.com/gitlab-org/quality/performance/issues/172) or [making it easier for users who have private clouds to use the GPT offline](https://gitlab.com/gitlab-org/quality/performance/issues/106).\n\nIf you want to use the GPT for yourself, the best place to start is with its [documentation](https://gitlab.com/gitlab-org/quality/performance#documentation). As mentioned earlier, most of the effort to use the GPT is preparing the intended environment. The docs will take you through this along with how to use the tool itself.\n\n## The GPT in action\n\nFinally after writing all about the GPT we should actually show it in action. Here's how it looks when running a load test for the [List Group Projects API](https://docs.gitlab.com/ee/api/groups.html#list-a-groups-projects) against our [10k Reference Architecture](https://docs.gitlab.com/ee/administration/reference_architectures/10k_users.html):\n\n[![asciicast](https://asciinema.org/a/O96Wc5fyxvLb1IDyviTwbujg8.svg)](https://asciinema.org/a/O96Wc5fyxvLb1IDyviTwbujg8?autoplay=1)\n\nRead the GPT [documentation](https://gitlab.com/gitlab-org/quality/performance/blob/master/docs/k6.md#test-output-and-results) for more details on output and results.\n\n## What’s next?\n\nOur aim is to make GitLab’s performance best in class. This is only the start of our performance testing journey with GPT and we are excited about the additional ways we can continue to help improve the customer experience.\n\n[Some examples of our plans for the next few releases](https://gitlab.com/gitlab-org/quality/performance/issues) include expanding test coverage to more of GitLab’s features and entry points (API, Web, Git) and expanding our work on the reference architectures, test data, and user behavior patterns to be as representative and realistic as possible.\n\nShare your feedback and/or suggestions on GPT here or on our [GPT project](https://gitlab.com/gitlab-org/quality/performance)! We welcome your ideas or contributions.\n\nCover image by [Taylor Vick](https://unsplash.com/@tvick?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/server?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText).\n{: .note}\n",[704,1328],{"slug":5569,"featured":6,"template":678},"how-were-building-up-performance-testing-of-gitlab","content:en-us:blog:how-were-building-up-performance-testing-of-gitlab.yml","How Were Building Up Performance Testing Of Gitlab","en-us/blog/how-were-building-up-performance-testing-of-gitlab.yml","en-us/blog/how-were-building-up-performance-testing-of-gitlab",{"_path":5575,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5576,"content":5582,"config":5587,"_id":5589,"_type":16,"title":5590,"_source":17,"_file":5591,"_stem":5592,"_extension":20},"/en-us/blog/all-aboard-merge-trains",{"title":5577,"description":5578,"ogTitle":5577,"ogDescription":5578,"noIndex":6,"ogImage":5579,"ogUrl":5580,"ogSiteName":692,"ogType":693,"canonicalUrls":5580,"schema":5581},"How starting merge trains improve efficiency for DevOps","No more queuing and waiting for pipeline results! Read how merge trains will speed up your deployments while making sure master stays green.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678419/Blog/Hero%20Images/merge_trains.jpg","https://about.gitlab.com/blog/all-aboard-merge-trains","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How starting merge trains improve efficiency for DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Orit Golowinski\"}],\n        \"datePublished\": \"2020-01-30\",\n      }",{"title":5577,"description":5578,"authors":5583,"heroImage":5579,"date":5584,"body":5585,"category":14,"tags":5586},[4631],"2020-01-30","\nA large percentage of a developer's day is spent updating their branches and rebasing, they are essentially \"racing\" their teammates to get their merge requests merged. Keeping the master branch green is critical for [continuous delivery](/topics/continuous-delivery/). When the production build breaks, it means your new code isn't going live, which impacts users and revenue. The only way to be 100% sure the master branch stays green when new code merges is to run the pipeline using the latest version of the master branch. For teams that have a high volume of merges, this can be difficult or even impossible. In the time it takes the pipeline to complete one code change, other changes can get merged to master with the potential for conflict. The only way to mitigate this is to queue and sequence the changes so that once a production pipeline starts, other code doesn't get merged ahead of that change. \n\n## What are merge trains and how do they help?\n\n Merge trains introduce a way to order the flow of changes into the target branch (usually master). When you have teams with a high number of changes in the target branch, this can cause a situation where during the time it takes to validate merged code for one change, another change has been merged to master, invalidating the previous merged result.\n\nBy using merge trains, each merge request joins as the last item in that train with each merge request being processed in order. However, instead of queuing and waiting, each item takes the completed state of the previous (pending) [merge ref](https://gitlab.com/gitlab-org/gitlab-foss/issues/47110) (the merge result of the merge), adds its own changes, and starts the pipeline immediately in parallel under the assumption that everything is going to pass.\n\nIf all pipelines in the merge train are completed successfully, then no pipeline time is wasted on queuing or retrying. Pipelines invalidated through failures are immediately canceled, the MR causing the failure is removed, and the rest of the MRs in the train are requeued without the need for manual intervention.\n\nAn example of a merge train:\n\n![Diagram of merge trains](https://about.gitlab.com/images/blogimages/merge_trains-1.png){: .shadow}\n\nMR1 and MR2 join a merge train. When MR3 attempts to join, the merge fails and it is removed from the merge train. MR4 restarts at the point that MR3 fails, and attempts to run without the contents of MR3.\nMR3 will remain open in failed state, so that the author can rebase and fix the failure before attempting to merge again.\n\nHere is a demonstration video that explains the advantage of the merge train feature. In this video, we'll simulate the common problem in a workflow without merge trains, and later, we resolve the problem by enabling a merge train.\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/D4qCqXgZkHQ\" frameborder=\"0\" allowfullscreen=\"true\">\n\u003C/iframe>\n\u003C/figure>\n\n## How the merge trains feature has evolved so far\n\nAfter releasing [merge trains](/releases/2019/06/22/gitlab-12-0-released/#sequential-merge-trains) in GitLab 12.0, we immediately started to use this feature internally, and collected a lot of valuable feedback which helped us to improve and enhance the feature.\n\nWe started by tuning the [merge train concurrency](https://gitlab.com/gitlab-org/gitlab/issues/31692). We understood that while merge trains is a feature that is designed to improve efficiency by making sure that master stays green, it can also create an unwanted bottleneck that slows down productivity if your merge requests needs to wait in a long queue in order to get merged.\n\nWe also noticed that many developers were \"skipping the line\" and merging their changes immediately because they did not understand the effect that merging immediately has on other users, so we added a [warning](https://gitlab.com/gitlab-org/gitlab/issues/12679) to clarify this common misunderstanding. We intentionally left the option to still \"merge immediately\" since we also understand the importance of an urgent merge request, such as a \"hot fix\" that must be able to skip to the front of the merge train. Another improvement was the ability to [“squash & merge” as part of the merge train](https://gitlab.com/gitlab-org/gitlab/issues/13001) in order to maintain a clean commit history.\n\nHere is a demonstration video that explains how squash & merge works with merge trains.\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/pA5SfHwlq0s\" frameborder=\"0\" allowfullscreen=\"true\">\n\u003C/iframe>\n\u003C/figure>\n\n## What's next\n\nWe plan to add more important features to the support of merge trains. The first is that [merge trains should support fast-forward merge](https://gitlab.com/gitlab-org/gitlab/issues/35628). This could help solve a fundamental contention problem of fast-forward merges: The CI pipeline must be run every time the merge request is rebased, and the merge request must be rebased every time master changes – which is frequently! This problem significantly limits the frequency with which merge requests can be merged.\n\nThe second feature, [API support for merge trains](https://gitlab.com/gitlab-org/gitlab/issues/32665), will extend the ability to automate your workflows using merge trains.\n\nWe want to hear from you! Tell us how merge trains have improved your workflow, or give us more insight into how we can improve merge trains to work better for you. [Give us your feedback by commenting here](https://gitlab.com/groups/gitlab-org/-/epics/2408).\n\nCover image by [Vidar Nordli-Mathisen\n](https://images.unsplash.com/photo-1525349769815-0e6ba4e0bbdd?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=1611&q=80) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[110,2932,727,749],{"slug":5588,"featured":6,"template":678},"all-aboard-merge-trains","content:en-us:blog:all-aboard-merge-trains.yml","All Aboard Merge Trains","en-us/blog/all-aboard-merge-trains.yml","en-us/blog/all-aboard-merge-trains",{"_path":5594,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5595,"content":5601,"config":5606,"_id":5608,"_type":16,"title":5609,"_source":17,"_file":5610,"_stem":5611,"_extension":20},"/en-us/blog/insights",{"title":5596,"description":5597,"ogTitle":5596,"ogDescription":5597,"noIndex":6,"ogImage":5598,"ogUrl":5599,"ogSiteName":692,"ogType":693,"canonicalUrls":5599,"schema":5600},"GitLab: New Tool to Visualize High-Level Project Trends","How our easy to configure Insights technology takes data from issues and merge requests to build visually appealing charts.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681053/Blog/Hero%20Images/birdseyeview.jpg","https://about.gitlab.com/blog/insights","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We're dogfooding a tool to help visualize high-level trends in GitLab projects\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-01-30\",\n      }",{"title":5602,"description":5597,"authors":5603,"heroImage":5598,"date":5584,"body":5604,"category":14,"tags":5605},"We're dogfooding a tool to help visualize high-level trends in GitLab projects",[3676],"\n\nOur policy at GitLab is to [dogfood everything](/handbook/engineering/development/principles/#dogfooding) – meaning we aren't going to introduce a new product or feature to our [DevOps platform](/solutions/devops-platform/) before our engineering team tests it out. Sometimes though, the development process happens in reverse: The product and engineering teams need a specific tool or functionality to help us run GitLab better and discover a tool that has the capacity to solve many different customer use cases.\n\n[Insights](https://docs.gitlab.com/ee/user/project/insights/), which is available to [GitLab Ultimate](/pricing/ultimate/) users, is an example of such a tool. Insights is a flexible feature of GitLab that allows our users to visualize different trends in workflows, bugs, merge request (MR) throughput, and issue activity that is based upon the underlying labeling system of a group. In this blog post, we'll go in-depth on how and why we built this tool, how we use the tool at GitLab, and explain how to configure Insights for your own projects.\n\n\n- [Why we built Insights](#why-we-built-insights)\n- [Labels powers Insights](#why-label-hygiene-matters)\n- [How to configure Insights](#configuring-your-insights-dashboard)\n- [How GitLab uses Insights](#how-we-are-dogfooding-insights)\n- [Implementing Insights in your instance](#implementing-insights-for-your-team)\n\n[Kyle Wiebers](/company/team/#kwiebers), quality engineering manager on Engineering Productivity, gives an overview of how we use Insights at GitLab in the GitLab Unfiltered video embedded below. Watch the video and read the rest of the post to learn all about this exciting new tool we're dogfooding at GitLab.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/kKnQzS9qorc\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Why we built Insights\n\nThe [Engineering Productivity team](/handbook/engineering/quality/#engineering-productivity) at GitLab first built Insights to provide an overview of trends in the issue tracker, but soon realized that this technology can be applied in different ways that were beneficial to our needs, and the needs of our users.\n\n\"The initial thing was we were interested in when the bugs were being raised: Were they being raised around release time or were they being raised the middle of a phase?\" says [Mark Fletcher](/company/team/#markglenfletcher), backend engineer on Engineering Productivity. \"Because we did have bugs being created just after release, which led to regressions, which led to patch fixes. So we were just interested in exploring those kinds of trends.\"\n\nTo capture this trend data the Quality Engineering team created the [quality dashboard](https://quality-dashboard.gitlap.com/groups/gitlab-org), which was essentially the first iteration of Insights for GitLab. While the quality dashboard showed trends in bugs being raised per release cycle, it also showed how much work was being accomplished over the same period.\n\n\"And that's where the scope really changed from looking at issues that are bugs to merge requests and being able to have generic rules based on labels that we can use to align with our workflow,\" says Kyle.\n\n## Why label hygiene matters\n\nThe Engineering Productivity team soon realized that a lot of the different trends they were aiming to capture with Insights were powered by [labels](https://docs.gitlab.com/ee/user/project/labels.html#overview). Labels allow a GitLab user to categorize epics, issues, and merge requests with descriptive titles such as \"bug\" or \"feature request\" and quickly filter based upon category. The label filtering system works inside the [issue tracker](https://gitlab.com/gitlab-org/gitlab/-/issues/?sort=created_date&state=opened&first_page_size=100), and all throughout GitLab, and is a core part of the underlying configuration of Insights.\n\nA good example of an Insights dashboard that is configured by labels and the metadata that underlies issues and merge requests (such as creation date) is the [MR throughputs dashboard](https://gitlab.com/groups/gitlab-org/-/insights/#/throughputs).\n\n![Merge request throughputs for group](https://about.gitlab.com/images/blogimages/merge_throughputs_group.png){: .shadow.medium.center}\nA screenshot of the chart for merge request throughouts at the group level.\n{: .note.text-center}\n\nThe MR throughputs dashboard captures how many MRs are completed during a given week or month to measure our organization's overall performance. It is part of our workflow to assign labels to MRs that help distinguish the type of MR being worked on: feature, bug, community contribution, security, or backstage. This dashboard is configured as a stacked bar chart, which makes it easy to visualize MR throughput by type so we can see the type of work being created over a fixed period of time. The chart is also divided into weekly or monthly views, which helps us see both short- and long-term trends.\n\n\"So, we can look at short-term trends and longer-term trends to see: Are we delivering more work? Are we hitting a bottleneck? Are we plateauing? And that allows us to dive a little bit deeper and take corrective action,\" says Kyle.\n\n### Labels help simplify the configuration of dashboards\n\nIf you look to the lefthand sidebar of the MR throughputs dashboard, you'll notice that the dashboard is configured at the Gitlab-org group level. The group level of GitLab-org contains all of the projects within GitLab-org and therefore captures all of the MR throughput data across all projects.\n\nThe project level is a level below the group level and looks at a specific project contained within a larger group, such as the GitLab project in the GitLab-org group.\n\n![Merge request throughputs for project](https://about.gitlab.com/images/blogimages/mr_throughputs_product.png){: .shadow.medium.center}\nA screenshot of the chart for merge request throughoutputs at the project level.\n{: .note.text-center}\n\nAny Insights dashboard, including the MR throughputs dashboard, can be filtered at the group level or the project level, but the configuration remains the same regardless of how the dashboard is filtered.\n\n\"So everything that's contained within a group, and in our case, it would be the GitLab-org group, you can also have this on a project level,\" says Kyle. \"So if you want to look at Insights on a project, you can configure the same thing on a project. Just for our use case, it made sense to look at MR throughputs across multiple projects versus one specific project.\"\n\nBut in the end, it all comes back to labels. We don't have to configure the Insights dashboard differently for groups and projects because all of our labels at GitLab are set up at the group level and then propagate down to the project level.\n\nOne of the characteristics of Insights that makes it such a valuable feature is that the configuration is so flexible. While most customers will use the same labeling system across groups and projects as GitLab does, it is possible to configure the charts separately at the project and group level.\n\n\"The scope [of Insights] changed from looking at issues that are bugs to merge requests and being able to have generic rules based on labels that we can use to align with our workflow,\" says Kyle. \"Then that flexibility allows any customers to leverage the same feature based on their own specific workflow or labeling practices.\"\n\nA user can use Insights on a group or project regardless of the underlying labeling system. They just need to configure the dashboard according to their workflow.\n\n## Configuring your Insights dashboard\n\nThere are numerous Insights dashboards that are available out of the box or that can be [easily configured](https://docs.gitlab.com/ee/user/project/insights/#configure-your-insights) based on a user's labeling workflow.\n\nAll of the Insights dashboards within GitLab are [driven by a YAML file](https://gitlab.com/gitlab-org/quality/insights-config/-/blob/master/.gitlab/insights.yml). The configuration for each chart includes configuration parameters: title, type, and query.\n\nThe query section defines the type of issues and/or merge requests from the issue tracker that will be included in the chart. The [parameters for which labels are contained in the chart](https://docs.gitlab.com/ee/user/project/insights/#queryfilter_labels) fall under the query section as well.\n\n\"The Insights configuration is actually stored in [one of your project's repositories]. So, it can be changed just like you do any of your code. It can be [version-controlled](/topics/version-control/) so you can see changes over time. That gives you a lot of value to just ensure that there's very clear traceability into why was this dashboard changed, and when was it changed,\" says Kyle.\n\nHere is the configuration that underlies the [MR throughputs dashboard](https://gitlab.com/groups/gitlab-org/-/insights/#/throughputs) we looked at extensively in the section above.\n\n```\nthroughputs:\n  title: Merge Request Throughputs (product only projects)\n  charts:\n    - title: Throughputs per Week\n      type: stacked-bar\n      query:\n        issuable_type: merge_request\n        issuable_state: merged\n        collection_labels:\n          - Community contribution\n          - security\n          - bug\n          - feature\n          - backstage\n        group_by: week\n        period_limit: 12\n    - title: Throughputs per Month\n      type: stacked-bar\n      query:\n        issuable_type: merge_request\n        issuable_state: merged\n        collection_labels:\n          - Community contribution\n          - security\n          - bug\n          - feature\n          - backstage\n        group_by: month\n        period_limit: 24\n```\n{: .language-ruby}\n\nExplore the [Insights YAML file for GitLab](https://gitlab.com/gitlab-org/gitlab-insights/blob/master/.gitlab/insights.yml) to see how we set up some of our other charts.\n\n## How we are dogfooding Insights\n\nInsights is most effective at monitoring high-level trends, as well as measuring performance against a specific measurable objective with the aim of taking corrective action. At GitLab, we've been using our Insights technology in different ways to visualize our overall performance or to answer specific questions.\n\nOur Support and Quality Engineering teams at GitLab currently use Insights, but in different ways. By dogfooding the technology here at GitLab, we've found numerous use cases for Insights that could be valuable to our customers.\n\n### How our Support team uses Insights\n\nThe Support team uses Insights both as an out of the box issue tracking dashboard and as a customized dashboard made possible using automation.\n\n#### Bugs SLO chart\n\nThe [Bugs SLO dashboard](https://gitlab.com/gitlab-org/gitlab/insights/#/bugsPastSLO) was created so the Support department and engineering leaders can identify bugs overdue from SLO.\n\n![Support team Bugs SLO chart](https://about.gitlab.com/images/blogimages/bugs_slo.png){: .shadow.medium.center}\nA chart specially configured for the Support team to show how many bugs missed the SLO each month.\n{: .note.text-center}\n\nThe Bugs SLO chart is configured in the GitLab-org group but lives in the GitLab project. The chart pulls open issues pertaining to bugs and customer bugs, that are labeled `missed-SLO` and groups them by month. We also have a [labeling system for categorizing based on priority](https://docs.gitlab.com/ee/development/labels/index.html#priority-labels) – P1 bugs are top priority, P2 bugs are second priority.\n\n\"This really allows us to, again, look at the trends: Are we improving? Are we getting worse? Do we need to look a little bit deeper here and do a corrective action to help address any problems that we see within the trends that Insights provides?\" says Kyle.\n\n#### Configuration of SLO chart\n\nHere is a peek at what happens inside the YAML file to configure the bugs SLO chart.\n\n```\nbugsPastSLO:\n  title: Bugs Past SLO\n  charts:\n    - title: Open bugs past priority SLO by creation month\n      type: stacked-bar\n      query:\n        issuable_type: issue\n        issuable_state: opened\n        filter_labels:\n          - bug\n          - missed-SLO\n        collection_labels:\n          - P1\n          - P2\n        group_by: month\n        period_limit: 24\n    - title: Open customer bugs past priority SLO by creation month\n      type: stacked-bar\n      query:\n        issuable_type: issue\n        issuable_state: opened\n        filter_labels:\n          - bug\n          - missed-SLO\n          - customer\n        collection_labels:\n          - P1\n          - P2\n        group_by: month\n        period_limit: 24\n```\n{: .language-ruby}\n\n#### Triage helps ensure good label hygiene\n\nFor the Bugs SLO chart, we use the [GitLab triage project](https://gitlab.com/gitlab-org/gitlab-triage) to [automatically apply the `missed-SLO` label to open issues with priority labels that miss the SLO target](/handbook/engineering/quality/triage-operations/#missed-slo). We use automation here because the GitLab project is so massive, it would not be feasible to manually apply this label based upon the missed SLO target rules. Insights is flexible enough that either manual labeling or automation can be used on any dashboard.\n\n### Support issue tracker\n\nThe Support team used one of our out of the box dashboards to [see how many Support issues are open and closed per month](https://gitlab.com/gitlab-com/support-forum/insights/#/issues) with the [GitLab.com Support Tracker project](https://gitlab.com/gitlab-com/support-forum), which looks at support issues raised by GitLab.com users that don't go through the Support team.\n\n![Support issue tracker](https://about.gitlab.com/images/blogimages/support_issue_tracker.png){: .shadow.medium.center}\nThe Support team also uses one of our out of the box dashboards that tracks the number of issues open and closed each month.\n{: .note.text-center}\n\n\"This shows that [the dashboard] is quite useful out of the box to just see some visualizations without doing any configuration,\" says Mark. \"These were the charts that we thought would give the most value to a team or to a project without doing any config whatsoever.\"\n\n## How our Quality Engineering team uses Insights\n\nThe Quality Engineering team uses Insights to look at opportunities to remedy gaps in a specific project in our EE, as well as to visualize flaky tests on GitLab based on reported issues.\n\n### Enterprise Edition testcases chart\n\nOne of our more specific use cases is the Enterprise testcases chart. The Quality Engineering department is working to close the gap in testcases in the GitLab Enterprise. The team [configured a chart](https://gitlab.com/gitlab-org/quality/testcases/insights/#/eeTestcasesCharts) within the [testcases project](https://gitlab.com/gitlab-org/quality/testcases/tree/master) to help visualize how many open and closed test gaps there are, separated by GitLab product area, and GitLab product tier.\n\n![EE testcases chart](https://about.gitlab.com/images/blogimages/EE_testcases.png){: .shadow.medium.center}\nQuality Engineering configured this chart to visualize gaps in testcases on GitLab Enterprise.\n{: .note.text-center}\n\n\"Looking at this chart, we may say, ‘Maybe we should have a few people focus on the gaps in verify because it has the most open testcases at the current point',\" says Kyle.\n\n#### Configuration of EE testcases chart\n\nThe EE testcases chart is not something that is available out of the box, but the [configuration for the chart](https://gitlab.com/gitlab-org/quality/testcases/blob/master/.gitlab/insights.yml) is pretty simple nonetheless.\n\n```\neeTestcasesCharts:\n  title: 'Charts for EE Testcases'\n  charts:\n    - title: Open testcases (backlog) by stage\n      type: bar\n      query:\n        issuable_type: issue\n        issuable_state: opened\n        filter_labels:\n          - \"Quality:EE test gaps\"\n        collection_labels:\n          - \"devops::configure\"\n          - \"devops::create\"\n          - \"devops::protect\"\n          - \"devops::enablement\"\n          - \"devops::growth\"\n          - \"devops::manage\"\n          - \"devops::monitor\"\n          - \"devops::package\"\n          - \"devops::plan\"\n          - \"devops::release\"\n          - \"devops::secure\"\n          - \"devops::verify\"\n```\n{: .language-ruby}\n\nThe configuration shows that this is a bar chart that is looking at open issues with the filter `Quality:EE test gaps`. The collection labels are what broke the bars out into different columns. While it is possible to illustrate the data in very intricate ways, the underlying schema to configure the chart is actually quite simple, mirroring the process of searching the issue tracker by filtering based on labels.\n\n![Issue tracker](https://about.gitlab.com/images/blogimages/issue_tracker_EE.png){: .shadow.medium.center}\nThe issues represented in the EE testcases chart can be searched for by label using the issue tracker in the testcases project.\n{: .note.text-center}\n\nOpening the issue tracker for the testcases project, you can search by `Quality:EE test gaps` label, select open issues, to see the actual issues represented by the Insights chart.\n\nThe key takeaway: If your team has good label hygiene and a logical workflow, building charts based on Insights should not be particularly challenging.\n\n### End-to-end transient failures\n\nThe Quality Engineering team monitors how often we have reports of flaky tests in our pipeline by looking at the number of issues created that fit the label schema.\n\n![End-to-end transient failure chart](https://about.gitlab.com/images/blogimages/end_to_end_chart.png){: .shadow.medium.center}\nA second chart configured for Quality Engineering is the end-to-end transient failure chart, which looks at flaky tests.\n{: .note.text-center}\n\nSimilar to many of our other charts, this is a stacked bar graph that looks at both open and closed issues on a weekly basis, and the underlying configuration is as you might expect.\n\n```\ntransientFailures:\n  title: End to end transient failures\n  charts:\n    - title: Opened transient failures per week\n      type: stacked-bar\n      query:\n        issuable_type: issue\n        issuable_state: opened\n        filter_labels:\n          - \"Quality\"\n          - \"QA\"\n          - \"bug\"\n        collection_labels:\n          - \"found:gitlab.com\"\n          - \"found:canary.gitlab.com\"\n          - \"found:staging.gitlab.com\"\n          - \"found:staging-orchestrated\"\n          - \"found:dev.gitlab.com\"\n          - \"found:nightly\"\n          - \"found:in MR\"\n        group_by: week\n        period_limit: 24\n    - title: Closed transient failures per week\n      type: stacked-bar\n      query:\n        issuable_type: issue\n        issuable_state: closed\n        filter_labels:\n          - \"Quality\"\n          - \"QA\"\n          - \"bug\"\n        collection_labels:\n          - \"found:gitlab.com\"\n          - \"found:canary.gitlab.com\"\n          - \"found:staging.gitlab.com\"\n          - \"found:staging-orchestrated\"\n          - \"found:dev.gitlab.com\"\n          - \"found:nightly\"\n          - \"found:in MR\"\n        group_by: week\n        period_limit: 24\n```\n{: .language-ruby}\n\n## Implementing Insights for your team\n\nIf your team is often pulling data from GitLab through an API or CSV export, and then building charts based on issues and merge request data, then Insights will make your life a lot easier!\n\nSome questions to think about before implementing Insights include: How would you want to categorize the work being done and the issues that are being created? How do you want to monitor the open/close rates on your issues? Also, how do you plan on using labels?\n\nInsights users really need to define their workflows and have a clear idea about how they're using labels. We recommend having some sort of [automated mechanism to ensure good label hygiene](/handbook/engineering/quality/triage-operations/#triage-automation). [GitLab Triage](https://gitlab.com/gitlab-org/gitlab-triage) is our open source project that automates labeling of issues on our giant GitLab project and is a good candidate for any organization that has a large backlog of issues.\n\nWe recommend users [read up more on the issues workflow](https://docs.gitlab.com/ee/development/contributing/issue_workflow.html) to learn more about how to use labels and the issue tracker, which is valuable background knowledge to improve your use of Insights.\n\nWe've been dogfooding Insights for a time to help iron out any wrinkles in the implementation or application of this technology, but we also want to hear your ideas of how we can make improvements to Insights. [Create an issue in the GitLab project issue tracker](https://gitlab.com/gitlab-org/gitlab/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=insights) with the Insights label to share your feedback with us!\n\nCover photo by [Aaron Burden](https://unsplash.com/@aaronburden) on [Unsplash](https://unsplash.com/photos/Qy-CBKUg_X8).\n{: .note.text-center}\n",[749,894,915],{"slug":5607,"featured":6,"template":678},"insights","content:en-us:blog:insights.yml","Insights","en-us/blog/insights.yml","en-us/blog/insights",{"_path":5613,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5614,"content":5620,"config":5625,"_id":5627,"_type":16,"title":5628,"_source":17,"_file":5629,"_stem":5630,"_extension":20},"/en-us/blog/iteration-on-error-tracking",{"title":5615,"description":5616,"ogTitle":5615,"ogDescription":5616,"noIndex":6,"ogImage":5617,"ogUrl":5618,"ogSiteName":692,"ogType":693,"canonicalUrls":5618,"schema":5619},"Why we scoped down to build up error tracking ","We dig into how shipping small iterations is accelerating delivery on our error tracking product.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665440/Blog/Hero%20Images/automate-ce-ee-merges.jpg","https://about.gitlab.com/blog/iteration-on-error-tracking","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we scoped down to build up error tracking \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-01-23\",\n      }",{"title":5615,"description":5616,"authors":5621,"heroImage":5617,"date":5622,"body":5623,"category":14,"tags":5624},[3676],"2020-01-23","When our vision for [error tracking](https://about.gitlab.com/direction/monitor/platform-insights/error_tracking/) is fully realized, the developers who use GitLab will be able to find and fix errors before their customers ever report them, all while staying in our tool. But waiting until our error tracking feature is pristine would just us slow down.\n\nInstead, the engineers and product managers on the [Monitor:Health](https://handbook.gitlab.com/handbook/engineering/development/ops/monitor/respond/) team work **iteratively** by shipping smaller changes as we move closer to achieving our vision for the error tracking feature.\n\n## What does it mean to work iteratively?\n\n\"[Iterating] means scoping down a task to deliver it sooner. So, it means making something smaller so you can get it done quicker,\" says [Sid Sijbrandij](/company/team/#sytses), CEO and co-founder of GitLab.\n\nWe made [iteration](https://handbook.gitlab.com/handbook/values/#iteration) one of our core company values because of the fundamental belief that even a small change is better than no change at all. And while iteration in engineering is already recognized as being effective, our organization aims to make iteration a component to every team’s workflow.\n\nIn the video below, Sid and [Christopher \"Leif\" Lefelhocz](https://about.gitlab.com/company/team/#christopher-l), senior director of development, share how the product and engineering teams worked together to speed up development on error tracking by breaking the engineering process down into small steps and iterating as they go.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/tPTweQlBS54\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWe followed up with the Monitor:Health team to talk about how product and engineering worked together to develop an iterative strategy for making improvements to our error tracking product, both in terms of how our product team built the plan for error tracking and how engineering shipped the [minimum viable change](https://handbook.gitlab.com/handbook/values/#minimal-viable-change-mvc) (MVC) to production.\n\n## How we created a product strategy for error tracking\n\nError tracking is a process whereby application errors are identified and fixed as quickly as possible. The way error tracking functions at GitLab today is [through integration with Sentry](https://about.gitlab.com/direction/monitor/platform-insights/error_tracking/), which aggregates errors, surfaces them in the GitLab UI, and provides the tools to triage and respond to the critical ones.\n\nToday, our error tracking feature is at the [minimal level of maturity](https://about.gitlab.com/direction/monitor/platform-insights/error_tracking/), meaning we still have plenty of work to do before this feature is viable.\n\n\"The goal was to be able to provide error tracking as a product and bring these processes closer to the development delivery workflow,\" said [Sarah Waldner](/company/team/#sarahwaldner), senior product manager on the Monitor:Health team.\n\nThe product team summarized what needs to be done to move [error tracking at GitLab from minimal to viable](https://gitlab.com/groups/gitlab-org/-/epics/1625) as part of a detailed [parent epic](https://docs.gitlab.com/ee/user/group/epics/#multi-level-child-epics). The parent epic essentially establishes product priorities by defining which use cases error tracking needs to solve in order for the product to be considered a viable feature. The next step was to define the core problems that users encounter with error tracking and double-check the solutions that should be used to solve those problems.\n\n\"Once we came up with these problems and validated those, we moved into a solution validation cycle whereby designers came up with different solutions and flows for these and then we tested them with different users,\" says Sarah. \"After we did all of that and have all of our solutions validated we broke it down into four different things that someone needs to do from a high level with Sentry.\"\n\nThose top four actions were divided into child epics which roll-up to the parent epic, and include:\n\n*   [The instrumentation or configuration of Sentry](https://gitlab.com/groups/gitlab-org/-/epics/2036)\n*   [Correlating errors](https://gitlab.com/groups/gitlab-org/-/epics/2035)\n*   [Resolving errors](https://gitlab.com/groups/gitlab-org/-/epics/2034)\n*   [Triaging errors](https://gitlab.com/groups/gitlab-org/-/epics/2029)\n\nBy breaking down the problems and establishing solutions, the team took an important step toward establishing their product development priorities. Contained in each of these child epics are other epics and issues which break down the solutions into the larger aspects.\n\n## Establishing development priorities\n\nThe team recognized that, in order to boost error tracking to viable, there needed to be a better way to resolve errors that are surfaced by Sentry within GitLab. The team created an epic for [resolving errors](https://gitlab.com/groups/gitlab-org/-/epics/2034), and outlined some of the key development priorities.\n\n\"So, to resolve errors, if you have an error that you need to fix, you might want to create an issue to track that work, respond to it, and close that issue in the general workflow,\" says Sarah. \"So within the resolving errors workflow part of the error tracking parent epic, we pose the idea of being able to manually open an issue from a Sentry error, which was then broken down further into where you do it from, and further again on the error detail page.\"\n\n![Resolve errors epic](https://about.gitlab.com/images/blogimages/resolve_errors_epic.png){: .shadow.medium.center}\nThe workflow for the resolve errors epic is broken down into multiple child epics, which correlate to different development projects.\n{: .note.text-center}\n\nThe team decided that we needed the ability to [create an issue within GitLab based on the errors detected by Sentry](https://gitlab.com/groups/gitlab-org/-/epics/2210) and that they wanted this function and button to appear on both the error list page as well as on the [error detail page](https://gitlab.com/groups/gitlab-org/-/epics/2210). The team then decided to make the error detail page the first priority.\n\n\"Through conversation, we were able to determine what is the bare minimum of value and broke it down as best as we could from frontend to backend, with the idea that it's better to ship something small that's not fully complete than (to ship) nothing at all,\" says [Clement Ho](/company/team/#ClemMakesApps), frontend engineering manager on Monitor:Health.\n\n## The \"Create an Issue\" button in three iterations\n\n\"Being able to open an issue from the error detail page seems really simple, but once you talk through what that workflow actually looks like, there are a lot more aspects to it than previously thought,\" says Sarah.\n\n![Open issue workflow](https://about.gitlab.com/images/blogimages/open_issue_epic.png){: .shadow.medium.center}\nBreaking the frontend and backend engineering into iterations shows just how much work needs to be done to ship even one minor component of the error tracking product.\n{: .note.text-center}\n\n### The \"Create an Issue\" button in stages\n\nClement was the architect behind the `Create an Issue` button frontend iterations. He explained that he wanted to take advantage of GitLab deploying frequently, and so he broke down the development process for the `Create an Issue` button into a series of small steps.\n\nThe [first iteration](https://gitlab.com/gitlab-org/gitlab/issues/36537) was simply to build the ability to create an issue from the error detail page. In this iteration, the `Create an Issue` button was simple and unstyled and clicking it led the user to a blank issue. While not overly helpful at this phase, it represents a good start in allowing someone to respond to an error.\n\n![Create an Issue button](https://about.gitlab.com/images/blogimages/create_an_issue_it1.png){: .shadow.medium.center}\nWhat the `Create an Issue` button will look like when it's done.\n{: .note.text-center}\n\nIn the [second iteration](https://gitlab.com/gitlab-org/gitlab/issues/36540), the user clicks `Create an Issue` and the issue comes pre-filled with the Sentry error title, description, and link. It’s still not styled and consistent with GitLab UI yet, but it’s possible to see more of the error context when creating an issue in response to the error.\n\nIn the [third iteration](https://gitlab.com/gitlab-org/gitlab/issues/36542), the GitLab UI gets cleaned up and the issue comes with proper formatting.\n\n\"Now, we are three issues into this and each one has been done in a couple of days and after the first couple of days, someone was able to create an issue,\" says Sarah. \"And that way we got the system much faster instead of first adding the button and then adding the experience of the new issue and then having all of the information in there styled.\"\n\n### Is it better to start with frontend or backend engineering?\n\nAs Christopher noted in his [conversation with Sid](https://www.youtube.com/watch?v=tPTweQlBS54), everything that Clement was working on in the first three iterations was frontend-focused; typically engineers start problem-solving from the backend.\n\n\"I love frontend first. I love interface first also because it helps everyone think about it,\" says [Sid in to Christopher regarding this project](https://www.youtube.com/watch?v=tPTweQlBS54). \"If you have something in the interface it's easier to understand for customers, for backend people, etc. So in the end what the customer sees is the product. One way to develop is to start with the readme or start with the press release. After that, the closest thing you can think of is the interface. So I think it's much better to have an interface built and then do the backend than vice versa. Even though I come from backend engineering.\"\n\nJust a few days after Clement started building the frontend of the `Create an Issue` button the backend team started building support in separate issues. The main priority was to build backend support that associates issues to errors so that users are not creating multiple issues for the same error. The engineers also built frontend support so the user can see that an issue was already created and linked to a particular error.\n\n## The power of iterative thinking\n\n\"One huge thing that came out of this is all team members now feel empowered to create issues and to just add them to the milestone and if they realize something is too big, they can create followups or second iterations,\" says Sarah.\n\nWhile the end goal is to build a viable error tracking product, the big vision simply cannot be achieved without smaller, incremental steps. While it is clear that the engineering teams embraced iteration, Sarah and the product team also recognized the strong strategic value of iterative product development.\n\nAt the same time, Clement wanted to take advantage of GitLab’s frequent deployments, but he also realized that by breaking down the engineering process into MVCs he could also drive up [merge request rate](https://handbook.gitlab.com/handbook/engineering/development/performance-indicators/#mr-rate) on the Monitor:Health frontend engineering team (the average number of merge requests per engineer merged per month) which is a [KPI](https://handbook.gitlab.com/handbook/engineering/development/performance-indicators/#mr-rate).\n\n![MR rate increases](https://about.gitlab.com/images/blogimages/mrs.png){: .shadow.medium.center}\nThe data shows an increase in the rate of merge requests on the Monitor:Health frontend engineering team.\n{: .note.text-center}\n\nThe data speaks for itself, since breaking down the product development process for error tracking into smaller iterations, the MR rate for Clement’s team has increased. 🎉\n\n## Scoping down to speed things up\n\nClement says that one of his key takeaways from this iterative development process was that GitLab ought to embrace iteration on the engineering side, but also iteration in product development. He is encouraging his team to ship MVCs more frequently, and plans to check his work by running through the process a few more times to iron out any wrinkles in the workflow.\n\nWhile the highly iterative approach to error tracking has been lauded by everyone from the senior director of development to our very own CEO, Clement acknowledges that this is still a work-in-progress.\n\n\"I think the cost is communication and information being spread out everywhere,\" Clement says.\n\nHe advises teams looking to adopt this highly iterative approach be extra disciplined at consolidating conversation on specific epics and issues within GitLab, otherwise, communication can get unwieldy, fast.\n\nCover photo by Max Ostrozhinskiy on Unsplash.\n{: .note}\n",[894,1979,1286,727],{"slug":5626,"featured":6,"template":678},"iteration-on-error-tracking","content:en-us:blog:iteration-on-error-tracking.yml","Iteration On Error Tracking","en-us/blog/iteration-on-error-tracking.yml","en-us/blog/iteration-on-error-tracking",{"_path":5632,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5633,"content":5639,"config":5644,"_id":5646,"_type":16,"title":5647,"_source":17,"_file":5648,"_stem":5649,"_extension":20},"/en-us/blog/windows-shared-runner-beta",{"title":5634,"description":5635,"ogTitle":5634,"ogDescription":5635,"noIndex":6,"ogImage":5636,"ogUrl":5637,"ogSiteName":692,"ogType":693,"canonicalUrls":5637,"schema":5638},"Windows Shared Runners beta now available on GitLab.com","Scalable Windows VM's for running Windows build jobs on GitLab.com.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681027/Blog/Hero%20Images/windows-shared-beta.jpg","https://about.gitlab.com/blog/windows-shared-runner-beta","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Windows Shared Runners beta now available on GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darren Eastman\"}],\n        \"datePublished\": \"2020-01-21\",\n      }",{"title":5634,"description":5635,"authors":5640,"heroImage":5636,"date":5641,"body":5642,"category":14,"tags":5643},[1544],"2020-01-21","\n\n\nGitLab has had support for Windows CI/CD Runners for quite a long time, but if you were doing Windows development, you needed to [install and manage these Runners](https://docs.gitlab.com/runner/install/windows.html) yourself. This works great for customers who prefer to manage their own Runners, but for customers who prefer to use GitLab.com shared Runners managed by the GitLab team, the choice has been limited to Linux.\n\nToday, we are happy to announce that Windows Shared Runners hosted by GitLab is available in beta. As we are starting to roll out this important service to our community, we invite you to help shape the direction of CI/CD tooling for the Windows ecosystem on GitLab.com.\n\n## What's new?\n\nNow, you can take advantage of a fully-managed, auto-scaling, and secure environment for running your build jobs on Windows virtual machines (VMs). These GitLab-hosted Windows Shared Runners are pre-configured with various software packages such as the Chocolately package manager for Windows,  Visual Studio 2019 Build Tools, Microsoft .Net Framework, to name a few. So you have a base set of tooling to start building your Windows applications without needing to set up and install your own self-hosted Windows Runners. You can find a full list of available Windows packages in the package [documentation](https://gitlab.com/gitlab-org/ci-cd/shared-runners/images/gcp/windows-containers/blob/main/cookbooks/preinstalled-software/README.md).\n\nWith the Windows Shared Runners on GitLab.com, each job runs in a new virtual machine instance that gets deleted after the job is complete, ensuring that your code is 100% isolated and secure. We also take care of maintenance and upgrades to the pre-configured software packages, so you don't have to. Just like with GitLab.com Linux Runners, there’s no requirement to use Shared Runners. If your build tooling configuration or security requirements demand it, you can, as always, [install and self-host Windows Runners](https://docs.gitlab.com/runner/install/windows.html) on your infrastructure.\n\n## Technology Overview\n\nThe following details a few key specifications for the Windows Shared Runners:\n\n- The Windows Shared Runners use the GitLab [custom executor](https://docs.gitlab.com/runner/executors/custom.html) that we introduced in 12.1.\n- A new Windows Shared Runners virtual machine is created for each pipeline job and deleted after the job is completed.\n\n## Pricing\n\nTo begin with, Windows Shared Runner pricing will be the same as Linux Runners. Usage for Windows Runners will be deducted from your Runner minute pool [depending on your plan](/pricing/#gitlab-com). You can optionally [purchase additional runner minutes](https://docs.gitlab.com/ee/subscriptions/gitlab_com/#purchase-additional-ci-minutes) that will be used for both Linux and Windows shared runners.\n\nIn the future, Windows Shared Runners will likely use separate pricing that is higher than Linux Minutes. Any future [pricing](https://gitlab.com/gitlab-org/gitlab/issues/30834) changes will be announced on the GitLab blog.\n\n\n## Getting started\n\nTo get started, create a `.gitlab-ci.yml` file in your GitLab hosted project's root directory and add the following tags: `shared-windows`, `windows`, and `windows-1809`  as shown in the example configuration file.\n\n```\n.shared_windows_runners:\n  tags:\n  - shared-windows\n  - windows\n  - windows-1809\n\nstages:\n  - build\n  - test\n\nbefore_script:\n - Set-Variable -Name \"time\" -Value (date -Format \"%H:%m\")\n - echo ${time}\n - echo \"started by ${GITLAB_USER_NAME}\"\n\nbuild:\n  extends:\n  - .shared_windows_runners\n  stage: build\n  script:\n  - echo \"running scripts in the build job\"\n\ntest:\n  extends:\n  - .shared_windows_runners\n  stage: test\n  script:\n  - echo \"running scripts in the test job\"\n```\n\nIncluding the `.gitlab-ci.yml` file in the project repository means that any new commits will trigger the execution of your [GitLab CI/CD pipeline](/topics/ci-cd/).  In this file, you have the option of specifying tags so that a job will only run on GitLab Runners that match the tag specified. For more information on the use of tags, refer to the [tags](https://docs.gitlab.com/ee/ci/yaml/#tags.) section of the GitLab CI/CD Pipeline Configuration Reference documentation. The [Shared Runners](https://docs.gitlab.com/ee/user/gitlab_com/#shared-runners) section of the GitLab.com settings documentation page covers more configuration information for the Windows Shared Runners.\n\n\n## Notable limitations and known issues\n\nThe hosting of Windows Shared Runners is a new service on GitLab.com. This section covers any limitations or known issues that users of the beta should take into consideration when using this service.\n\n- The average provisioning time for a new Windows VM is at five minutes. This means that for the beta, you will notice slower build start times on the Windows Shared Runners fleet compared to Linux. In a future release, we will add capabilities to the autoscaler to enable the pre-warming of the virtual machine instances. This will significantly reduce the time it takes to provision a VM on the Windows fleet. Additional details and plans are covered in this [issue](https://gitlab.com/gitlab-org/ci-cd/custom-executor-drivers/autoscaler/issues/32).\n- Pending queue times will be longer than the queue times on the Linux Shared Runner fleet.\n- Since Windows Shared Runners are currently in beta, the performance, uptime, and capabilities will be limited, and so, they are not recommended for production use.\n- The Windows Shared Runners virtual machine instances do not use the GitLab Docker executor. This means that unlike the Linux Shared Runners, you will not be able to specify `image` and `services` in your pipeline configuration.\n- For the beta release, we have included a set of software packages in the base VM image. If your CI job requires additional software that's not included in this [list](https://gitlab.com/gitlab-org/ci-cd/shared-runners/images/gcp/windows-containers/tree/master/cookbooks/preinstalled-software), then you will need to add installation commands to `before_script` or `script` to install the required software. Note: Each job runs on a new VM instance, so the installation of additional software packages needs to be repeated for each job in your pipeline.\n- There is the possibility that we introduce breaking changes that will require updates to pipelines that are using the Windows Shared Runner fleet.\n\n## Next steps\n\nWe [plan](https://gitlab.com/groups/gitlab-org/-/epics/2162) to continue to iterate quickly and improve the build environment, Runner, and tooling during the beta period. We invite you to complete this short [form](https://forms.gle/9qaB2kQcBX93PVax5) because your feedback is critical to helping us prioritize work on the most valuable improvements to the Windows Shared Runners solution.\n\nTo report a bug or request a feature or enhancement, follow these steps:\n- Open an issue in the [GitLab Runner project](https://gitlab.com/gitlab-org/gitlab-runner/issues).\n- Describe the feature enhancement and, if possible, include any links to examples from your repository.\n- Add these labels to the issue: `Shared Runners::Windows`, `group::runner`\n- Tag [@DarrenEastman](https://gitlab.com/DarrenEastman) on the issue.\n\n\n\n\nCover photo by William Daigneault on [Unsplash](https://unsplash.com/)\n{: .note}\n\n\n",[110],{"slug":5645,"featured":6,"template":678},"windows-shared-runner-beta","content:en-us:blog:windows-shared-runner-beta.yml","Windows Shared Runner Beta","en-us/blog/windows-shared-runner-beta.yml","en-us/blog/windows-shared-runner-beta",{"_path":5651,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5652,"content":5658,"config":5663,"_id":5665,"_type":16,"title":5666,"_source":17,"_file":5667,"_stem":5668,"_extension":20},"/en-us/blog/gitlab-changes-to-cloudflare",{"title":5653,"description":5654,"ogTitle":5653,"ogDescription":5654,"noIndex":6,"ogImage":5655,"ogUrl":5656,"ogSiteName":692,"ogType":693,"canonicalUrls":5656,"schema":5657},"Why GitLab.com is changing its CDN provider to Cloudflare March 28","Get the scoop on our plan to change GitLab.com to Cloudflare.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665811/Blog/Hero%20Images/daytime-clouds.jpg","https://about.gitlab.com/blog/gitlab-changes-to-cloudflare","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why GitLab.com is changing its CDN provider to Cloudflare March 28\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David Smith\"}],\n        \"datePublished\": \"2020-01-16\",\n      }",{"title":5653,"description":5654,"authors":5659,"heroImage":5655,"date":5660,"body":5661,"category":14,"tags":5662},[2463],"2020-01-16","\n\n## Upcoming changes to our CDN for GitLab.com\n\nAs GitLab.com has grown, so have our needs around the security and scalability of the web application. We are in the process of changing our CDN provider to [Cloudflare](https://www.cloudflare.com/) as part of our improvements to GitLab.com. We are approaching this change with care, and this post is to let everyone know about the shift ahead of time.\n\n## Update on timing\n\nWe have picked the weekend of March 28, 2020 to do the switch to Cloudflare.  Recent incident work for issues on GitLab.com has made us decide to push back from March 21 which was our date published last week.\n\n### Why are we working on this?\n\nWe are currently using [Fastly](https://www.fastly.com) for serving static content, but we want to improve GitLab.com availability, security, and performance with other tools like a Web Application Firewall (WAF), [Spectrum](https://www.cloudflare.com/products/cloudflare-spectrum/), and [Argo](https://www.cloudflare.com/products/argo-smart-routing/). We also want to preserve the current workflow: Interacting with GitLab.com for both `git` and web application interactions. Since GitLab.com serves more than just https traffic, the change is a little more complicated. The traffic pattern requires we use a solution that could handle traffic for port 22 and port 443. As a result of the complexity and requirements, we realized we would like to have a solution for CDN, WAF, and DDOS protection with one vendor.\n\nDuring the summer of 2019, we did evaluations and chose Cloudflare as the vendor who could best meet our requirements. Now that we are closer to switching over, we have created a [readiness review](https://gitlab.com/gitlab-com/gl-infra/readiness/tree/master/cloudflare) to talk about our plans for the change over.\n\n### What you need to know\n\nFirst, this change will not affect self-managed users of GitLab, this is only for users of GitLab.com. At a very high level, most users of GitLab.com will not need to take any action.\n\nGitLab.com users with a whitelist of sites in their firewall setup will need to change what is whitelisted for GitLab.com. For the initial change, we will be switching DNS to Cloudflare. This will cause all GitLab.com traffic to be proxied through Cloudflare. This change will be visible by changes in DNS records queried for GitLab.com.\nA whitelist of IPs can be found [here](https://www.cloudflare.com/ips/).\nWe wanted to make sure this is communicated ahead of time, as this is an important detail, which may be in use by some firewalling setups.\n\nSSH-based `git` actions via `altssh.gitlab.com` on port 443 continue to be supported. As with GitLab.com, any firewalls you set up might need to be reconfigured to the new IP ranges.\n\nCustom runner images or private runners could also be affected if they have any kind of caching of DNS or SSL certificates.\n\n### How can I stay up to date on when the change will happen?\n\nWe will update this blog post, [GitLab status](https://status.gitlab.com), and [@gitlabstatus on twitter](https://www.twitter.com/gitlabstatus) with the planned date of this initial change – likely sometime in early February 2020. When it is time for the change on GitLab.com, we will also update [GitLab.com ranges](https://docs.gitlab.com/ee/user/gitlab_com/#ip-range) with the range from [Cloudflare](https://www.cloudflare.com/ips/).\n\nOnce we know traffic is flowing through Cloudflare successfully, we will start exploring more features like the WAF in logging-only mode.  We will also test [Argo](https://www.cloudflare.com/products/argo-smart-routing/) and we hope again that traffic to GitLab.com is faster.\n\nFeel free to ask our support team your questions, and they will be able to talk to our infrastructure team for the details. Thanks for your continued support and check here for more updates soon!\n\n### Links to our plans and other information\n\n1. [GitLab status: Subscribe by email, twitter, webhook, slack](https://status.gitlab.com)\n2. [More discussion about this blog post](https://gitlab.com/gitlab-com/www-gitlab-com/issues/5907)\n3. [Production readiness review MR](https://gitlab.com/gitlab-com/gl-infra/readiness/tree/master/cloudflare)\n4. [Top-level epic](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/94)\n5. [Cloudflare privacy policy](https://www.cloudflare.com/privacypolicy/)\n6. [Cloudflare IP ranges](https://www.cloudflare.com/ips/)\n7. [Cloudflare Prometheus Exporter](https://gitlab.com/gitlab-org/cloudflare_exporter)\n\n\n### Definitions\n- Web Application Firewall (WAF): A type of firewall that helps protect web applications from a specific set of attacks\n- Argo: Cloudflare product that helps route web traffic across the fastest and most reliable network paths\n- Spectrum: A Cloudflare product that helps secure the types of ports that GitLab.com uses for SSH access\n\nCover image by [Sam Schooler](https://unsplash.com/photos/E9aetBe2w40) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[704,1286,1307],{"slug":5664,"featured":6,"template":678},"gitlab-changes-to-cloudflare","content:en-us:blog:gitlab-changes-to-cloudflare.yml","Gitlab Changes To Cloudflare","en-us/blog/gitlab-changes-to-cloudflare.yml","en-us/blog/gitlab-changes-to-cloudflare",{"_path":5670,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5671,"content":5676,"config":5682,"_id":5684,"_type":16,"title":5685,"_source":17,"_file":5686,"_stem":5687,"_extension":20},"/en-us/blog/future-merge-requests-realtime-collab",{"title":5672,"description":5673,"ogTitle":5672,"ogDescription":5673,"noIndex":6,"ogImage":2284,"ogUrl":5674,"ogSiteName":692,"ogType":693,"canonicalUrls":5674,"schema":5675},"The future of merge requests: Real-time collaboration","We want to hear your thoughts on the future of merge requests and code review.","https://about.gitlab.com/blog/future-merge-requests-realtime-collab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The future of merge requests: Real-time collaboration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Pedro Moreira da Silva\"}],\n        \"datePublished\": \"2019-12-19\",\n      }",{"title":5672,"description":5673,"authors":5677,"heroImage":2284,"date":5679,"body":5680,"category":14,"tags":5681},[5678],"Pedro Moreira da Silva","2019-12-19","\n\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2019-12-20.\n{: .alert .alert-info .note}\n\nWe want to share some of the work we’ve been doing in the [Source Code](/handbook/product/categories/#source-code-group) part of the product and get feedback on what could be the future of [merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/) and [code review](/direction/create/code_review/).\n\n**Perhaps the best way is to walk you through a short visual story. You can** [**watch the recording (28 min)**](https://www.youtube.com/watch?v=KpdvIU6hv94) **or** [**jump to its text version**](#context) **below. In the end, if you’d like to share your thoughts, you can do so on the** [**feedback issue**][feedback-issue].\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/KpdvIU6hv94\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## Context\n\nIf you haven’t been following along our work on merge requests, let’s set the scene:\n1. In the next releases of GitLab, we’re shipping [**performance**](https://gitlab.com/groups/gitlab-org/-/epics/1417) **and** [**navigation**](https://gitlab.com/gitlab-org/gitlab/issues/33813) **improvements**, based on the user experience research we’ve been doing.\n1. We have also an exciting [**new Sourcegraph integration**](/blog/sourcegraph-code-intelligence-integration-for-gitlab/) which levels up the merge request interface allowing you to navigate code, jumping to definitions or finding references.\n1. **Other improvements** we’ve shipped over the past year include [multiple assignees](https://docs.gitlab.com/ee/user/project/merge_requests/creating_merge_requests.html#multiple-assignees), [multiple approval rules](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/rules.html#multiple-approval-rules), [code owners](https://docs.gitlab.com/ee/user/project/codeowners/), and [suggest changes][suggest-changes] – all of these were based on research and user feedback (your feedback).\n\nThese are nice improvements but they are iterating on what we already have and are not substantially changing the code review workflow. Today’s merge request experience is pretty similar to what it was five years ago – we’ve improved it a lot but its fundamental structure is the same.\n\nWe’ve been hearing feedback from users and customers about how we might improve merge requests to solve harder problems and that may require rethinking what we’ve come to accept as the default. **The merge request is a significant place, where people collaborate, where knowledge is shared, where people grow in their skills, where code quality is ensured and improved.** As a result, significant time is spent in the merge request interface. So, we’ve been asking ourselves:\n\n> _How can we significantly decrease the cycle time, increase the efficiency of code review, and create better ways of collaborating?_\n\nTo simplify our approach, we have defined **three key questions** that have also surfaced through research and our own usage:\n1. **Catch up**: Understanding where the merge request is since I last looked at it, what needs my attention, what’s changed, and helping me review that difference more efficiently after first reviewing it.\n1. **Real time collaboration**: Many teams are in the same location or same time zone and have significant overlap. Even here at GitLab, an [all-remote company](/company/culture/all-remote/), most people who work together share a few of hours of overlap and are sometimes looking at the same things. So if people are coming to the same merge request, and working on it at the same time, how can we make that more efficient?\n1. **External discussions**: Working remotely, through [asynchronous communication](/handbook/communication/#introduction), is how we mainly communicate at GitLab. It’s something that we do quite well, but sometimes it’s better to work synchronously by just getting on a call and solve confusions, communicate, sketch things out, and then document those decisions. But unfortunately, the merge request doesn’t provide a natural way to do that. We use Zoom, we use Google Docs, and then we try to summarize in merge requests or issues. What if we could integrate all of that into the merge request?\n\nTo explain what we’ve been thinking about and how we could answer these questions, here is a **short visual story**. The following images are very low-fidelity mockups so that we can focus on ideas rather than dwelling on the details.\n\n## 1. Catch up\n\n> The story starts with me, as a reviewer, coming into a merge request and looking at my personal area at the top right corner.\n\n![](https://about.gitlab.com/images/blogimages/future-merge-requests-realtime-collab/step1.png){: .shadow.medium.center}\n\n> I can immediately see what has happened since I last reviewed the merge request: Two files with new changes to review and one comment that needs my attention. This comment may be someone that mentioned me, someone that replied to one of my comments, or maybe someone that resolved a thread I’m participating in.\n\n## 2. Smarter suggestions\n\n> I click the speech bubble icon to jump to that comment, where Katherine, the merge request author, and James, another reviewer, are participating. Hmm, this discussion is getting a bit convoluted... but since they are in other timezones, I’ll reply with my thoughts so they can read them in their own time. It also looks like we need specific expertise here. When I start @-mentioning, I see that André was the person who last changed this line of code (and he’s also a code owner of this file). He might have an idea why this was changed, so I’ll mention him in my comment.\n\n![](https://about.gitlab.com/images/blogimages/future-merge-requests-realtime-collab/step2.png){: .shadow.medium.center}\n\nToday in GitLab we already suggest people who are involved in the merge request or conversation, but this idea brings in new data and more clearly highlights why people are relevant.\n\n## 3. Real time collaboration\n\n> Meanwhile, I notice that Katherine and James are **looking at this merge request right now**, by seeing their avatars popping up at the top of the merge request – _very similar to how you would see someone in Google Docs looking at the same document that you are_. I also get a notification saying that Katherine mentioned me in a comment and that James is now replying to one of my comments.\n\n![](https://about.gitlab.com/images/blogimages/future-merge-requests-realtime-collab/step3.png){: .shadow.medium.center}\n\nI can see people interacting with the merge request in real time, I can notice their presence, but I’m not seeing what they’re typing or collaborating directly with them – we’re just going about doing our tasks separately. **The way we work may change if we expect an immediate response.** For example, if you notice someone is online, you might ask more open-ended questions because you know that the other person can respond more quickly versus writing a longer response that seeks to conclude the discussion faster. And this is helpful not only for people that work distributed and asynchronously but also for people that work in the same time zone, in the same location, in the same office, or even in the same room.\n\n**These presence indicators create certain expectations and give a sense of progress to the merge requests as well.** When you’re waiting on a code review, you don’t know if someone has looked at it or if the reviewers are close to finishing the review. This can save people from interrupting one another. This provides a sense of progress without needing someone to take an active measure to find out if progress is being made. This is important because progress is one of those things that we all want to feel when we’re waiting on someone or something else.\n\n> Back to the story... This is great timing! Since we’re all online maybe it’s time we jump on a call to clear up the discussions together and get back on track.\n\n![](https://about.gitlab.com/images/blogimages/future-merge-requests-realtime-collab/step4.png){: .shadow.medium.center}\n\n> By using this dropdown button next to the avatars, I can immediately start a shared session or invite the other participants to a video call, using Zoom (one of our favorite tools here at GitLab). Starting a Zoom meeting also starts a collaborative shared session in the merge request. **In the shared session we can follow each other and write comments together in real time.** I can now follow James and Katherine and see which files they are looking at, in which lines they’re commenting, and also what they’re writing, in real time. I can also ask to be followed if I want to focus all of the participants’ attention to a specific comment or file. I can even copy a link to this shared session and share it via our chat tool or email so that others can join without having to find the merge request.\n\n![](https://about.gitlab.com/images/blogimages/future-merge-requests-realtime-collab/step5.png){: .shadow.medium.center}\n\nBut the most important thing is the ability to **co-author and collaborate in real time on comments in the merge request**. Not only commenting on the changes but also in the merge request overview (comments that are not attached to specific lines). You can collaborate while you’re in a video call, on the phone, or just in the office next to each other.\n\n## 4. External discussions\n\n![](https://about.gitlab.com/images/blogimages/future-merge-requests-realtime-collab/step6.png){: .shadow.medium.center}\n\n> Collaborating in real time on the merge request allowed us to quickly reach a consensus. **And we could easily record everything we discussed in GitLab, instead of using separate tools.** Before ending the shared session, we add a joint comment with a summary and next steps.\n\nWe know that some teams do code reviews in meeting rooms, projecting the changes and all reviewing it together. Everyone’s got their laptops but they’re sort of looking at the same thing, but not quite. These abilities would allow everyone to take notes collaboratively, creating an interesting way of documenting. This is just one use case, there are likely many more use cases that we can solve here.\n\nYou'll notice that it looks a lot like a Google Doc, but on a merge request in GitLab. From a user experience standpoint, we must try to use metaphors and patterns that people have seen and used in other common tools. **Looking and behaving like Google Docs is a good thing because people can immediately relate and understand what these cursors and avatars mean.** But it’s about understanding these paradigms, not accepting them blindly. We strive to study and see if they fit into the situation at hand and if the users recall using this metaphor for this purpose. Using [boring solutions](https://handbook.gitlab.com/handbook/values/#boring-solutions) is part of our values at GitLab.\n\nToday we use Google Docs to take notes during our meetings but maybe this can replace the need to have all of these different tools open simultaneously while we’re on a video call. You can use your preferred tools, like a projector, to collaborate better depending on your work setting and what works best for you and your team, but in the end **everything is recorded in the same tool, in the same integrated environment that is GitLab.** There is no data duplication and it stops teams from using tools like Google Docs to record information that then gets replicated in merge requests and issues, instead the information can go directly into the single source of truth where the decision and discussion are relevant. If we can work out how to do this properly on merge requests we can perhaps apply it to issues and epics as well.\n\n## 5. Record decisions\n\n> Finally, in the shared session we realized that one of the changes must be documented for posterity so that we can prevent mistakes and clarify our decision. So I come back to the comment to commit it to the code base. **This adds the comment directly to the source file as a code comment.**\n\n![](https://about.gitlab.com/images/blogimages/future-merge-requests-realtime-collab/step7.png){: .shadow.medium.center}\n\nOne thing that we see happening a lot at GitLab and in other companies is people explaining a decision or why we shouldn’t touch this specific part of the code, in writing. This valuable content is usually left in comments, emails, or chat messages, when it should be a _code comment_. **Code comments allow explanations, decisions, and rationale to be left for posterity so that future authors can be aware of them.**\n\nOur [suggest changes feature][suggest-changes] in merge requests allows you to include suggestions as part of your line comments. These suggestions can then be applied directly to the file where the comment was made, replacing its contents with the suggestion. With the idea of \"Commit as file comment,\" this is slightly different, as you’d be applying the comment to the file itself, as a code comment.\n\nMoving the explanation into the actual file rather than leaving it as a comment in the merge request **makes it accessible wherever the code goes**. It’s in the Git repository, it has a timestamp, it’s part of the repository history, and it’s even in your desktop IDE. It means that if you refactor your repository, split this module out into another repository, the comment follows that line of code.\n\n## Conclusion\n\n**We’re not entirely sure how these ideas will be executed, if all of them are viable, or if they’re even good ideas.** That is why we’d love to hear your thoughts on the [**feedback issue**][feedback-issue]. Share with us what you like, what you don’t like, and what other ideas you have for code review. This short story doesn’t mean that we’re going through with these ideas but that we think they are possible directions to solve those big problems that we’ve been seeing with code review at GitLab.\n\nIf you visualize the communication tools as a spectrum, right now GitLab’s merge requests are similar to email: you’re sending \"emails\" you’re receiving \"emails\" and it’s not a real time discussion, you’re not seeing what other people are typing, you don’t know when you’re going to get a response. On the other end of the spectrum is Google Docs: You see exactly what people are typing in real time, and expectations are more clear. Somewhere closer to Google Docs, you have Slack, which as a chat tool is very much focused on synchronous communication (although it can also be used for asynchronous communication). **We want to be in the middle of this communication tools spectrum, not like a Google Doc, always on, always real time, but we also don’t want to stay as an \"email client for collaborating on code.\"** We want to be smarter than that, enabling collaboration at the right moments.\n\nOne question that has been raised is **privacy**: People feel concerned about revealing when they’re looking at a merge request. Privacy is also a big concern in the sense of \"peer pressure,\" people changing their behaviors because they know they can be observed by others. We have to find some middle ground between people that opt to be more private and quietly observe things, versus people that opt for maximum efficiency and prefer collaborating in real time when everyone is in the merge request. Another concern is how to **avoid interrupting** people's flow. We anticipate that these are some of the interesting challenges we will have to wrestle with and balance against helping teams work most efficiently.\n\nThese concerns are one of the main reasons why we are sharing our ideas. We realized that these are some of our blind spots and that there could be others, so **we need** [**your feedback**][feedback-issue]. There also might be other, even more amazing, crazy ideas that we haven’t thought of yet which would take merge requests to an exciting new place. **So don’t limit your feedback to the ideas that we’ve come up with, feel free instead to share ideas that you think would make the merge request an even better place to work and get your job done.**\n\nIf you’re interested in our macro strategy and plans of how we’re going to help you better manage, plan, and create in GitLab, take a look at our [Dev strategy](/blog/dev-strategy-review/).\n\n{::options parse_block_html=\"true\" /}\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>&nbsp;&nbsp;\nHelp shape the future of code review - [Share your feedback][feedback-issue]\n&nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n{: .alert .alert-webcast}\n\nCover image by [Mitchell Luo](https://unsplash.com/@mitchel3uo) on [Unsplash](https://unsplash.com/photos/H3htK85wwnU)\n{: .note}\n\n[feedback-issue]: https://gitlab.com/gitlab-org/gitlab/issues/36119\n[suggest-changes]: https://docs.gitlab.com/ee/user/discussions/#suggest-changes\n",[1084,1347,1144],{"slug":5683,"featured":6,"template":678},"future-merge-requests-realtime-collab","content:en-us:blog:future-merge-requests-realtime-collab.yml","Future Merge Requests Realtime Collab","en-us/blog/future-merge-requests-realtime-collab.yml","en-us/blog/future-merge-requests-realtime-collab",{"_path":5689,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5690,"content":5696,"config":5701,"_id":5703,"_type":16,"title":5704,"_source":17,"_file":5705,"_stem":5706,"_extension":20},"/en-us/blog/gl-for-pm-prt-2",{"title":5691,"description":5692,"ogTitle":5691,"ogDescription":5692,"noIndex":6,"ogImage":5693,"ogUrl":5694,"ogSiteName":692,"ogType":693,"canonicalUrls":5694,"schema":5695},"2 Examples of how marketing uses GitLab to manage complex projects","How GitLab technology powers integrated marketing campaigns and product marketing projects.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680908/Blog/Hero%20Images/stickynotes.jpg","https://about.gitlab.com/blog/gl-for-pm-prt-2","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"2 Examples of how marketing uses GitLab to manage complex projects\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-12-11\",\n      }",{"title":5691,"description":5692,"authors":5697,"heroImage":5693,"date":5698,"body":5699,"category":14,"tags":5700},[3676],"2019-12-11","\n\n_In [part one of this series](/blog/gitlab-for-project-management-one/) we looked at the pervasive problems around collaboration and how GitLab was built to resolve those challenges both in and out of the software development space. In this second part we take a detailed look at how our marketing teams used GitLab for project management._\n\nWhen we jumped in to using GitLab for project management, we did it in a big way. The [Just Commit marketing campaign](https://gitlab.com/groups/gitlab-com/marketing/-/epics/7) which launched in January 2019 is a good example of how the marketing team uses GitLab features like issues and epics.\n\n\"It was our first integrated campaign, and if you're not familiar with what that means, it's basically landing a single message across all channels,\" says [Jackie Gragnola](/company/team/#jgragnola), marketing programs manager. “So using social media, digital marketing, all of our content, our website. and in doing so, it was involving a lot of different team members.\"\n\nSince there were so many stakeholders involved, it was unrealistic that something like a Google Doc could provide the infrastructure necessary for efficient and transparent collaboration. Jackie migrated her kick-off document from Google Docs over to GitLab. \"It was the first test into using epics to give the high-level information and then organize the group into a single unified vision for what this campaign would become,\" she explains.\n\n![justcommit-integratedcampaign](https://about.gitlab.com/images/blogimages/gitlab-for-proj-management2/justcommit_integratedcampaign.png){: .shadow.large.center}\nThe Just Commit integrated campaign epic included the JustCommit label, as well as campaign goals, personas the campaign is targeting, links to recorded meetings, and more.\n{: .note.text-center}\n\nThe Just Commit ancestor epic also included details such as [UTM tracking links](https://gitlab.com/groups/gitlab-com/marketing/-/epics/7#utm-for-tracking-urls), a [list of teams and DRIs involved in the campaign](https://gitlab.com/groups/gitlab-com/marketing/-/epics/7#teams-involved-roles-responsibilities), and a [timeline of key dates and deliverables](https://gitlab.com/groups/gitlab-com/marketing/-/epics/7#key-timeline-dates) in the lead-up to the Feb. 18, 2019 launch.\n\nA level below the ancestor epic are child epics, which were organized by areas of action items. Some examples include organic search, webcasts, emails, and events; messaging and positioning, etc.\n\n![justcommit-child epics](https://about.gitlab.com/images/blogimages/gitlab-for-proj-management2/jc-childepics.png){: .shadow.large.center}\nExamples of some of the child epics for the Just Commit integrated campaign.\n{: .note.text-center}\n\nThe Just Commit label that was created was tagged to issues related to the campaign. It is simple enough to get a high-level overview of what issues are related to the Just Commit campaign by searching for the label.\n\nIn order to dig deeper into the different categories of work, you’d look at the issue list within the different child epics. The issue list functions essentially as a list of what needs to get done, and provides a good overview of what’s left to accomplish on the list.\n\n![justcommit-issue list](https://about.gitlab.com/images/blogimages/gitlab-for-proj-management2/jc-strategy-and-design.png){: .shadow.large.center}\nThis is an example of the issue list from the strategy and design child epic.\n{: .note.text-center}\n\nInside each issue is a DRI and a due date. The due dates were important not just to stay ahead of deadline, but also because there were a lot of dependencies baked into the integrated campaign.\n\n\"We couldn't work on the content until we knew what the message was, and we couldn't work on anything related to digital marketing until we had the designs approved,\" says Jackie. \"So, this just kept us organized by saying what we needed to get done by what dates and kept us up-to-date on the timeline that would help us hit that delivery date.\"\n\nBy using GitLab features such as ancestor epics, child epics, issues, and labels, the Just Commit integrated campaign kept all stakeholders updated on their progress and accountable for their deliverables.\n\n## How product marketing uses GitLab\n\n[Tye Davis](/company/team/#davistye) is a technical marketing manager and he uses GitLab for managing product marketing projects.\n\n### Use issue boards to get a global overview of work\n\nTye works primarily within the [product marketing project](https://gitlab.com/gitlab-com/marketing/product-marketing), which is housed in the broader marketing group. Just like we saw in the Just Commit integrated campaign, there are various ancestor epics, child epics, and issues housed within this project.\n\nThe [issue board view](https://docs.gitlab.com/ee/user/project/issue_board.html) is a useful way to visualize and organize all the issues and activity happening within a specific group or project. Viewing an issue board is simple enough: Just select boards under the issues tab to see all of the issues within a specific group, or to narrow the scope select a specific project. But building one is another matter entirely.\n\nIt is important to think strategically about the level at which you build your issue board, because that will impact how much information is rolled up into the board.\n\n\"You have to think about where your work lies and where you should be building your issue boards in epics,\" says [JJ Cordz](/company/team/#jjcordz), senior marketing ops manager. \"As an example, in marketing ops we presently work across departments so we do a lot of with sales ops, biz ops, sales in general, and all of those are individual projects and groups. So our issue board is actually built at this highest level (i.e., marketing group level) because we need to pull in everything else.\"\n\nBut not every team is as integrated as marketing ops. Sometimes building an issue board at the team level, instead of the group or project level, makes the most sense for your workflow.\n\nThe [technical marketing team has its own issue board](https://gitlab.com/gitlab-com/marketing/product-marketing/-/boards/926375?&label_name[]=tech-pmm), and it is sorted by labels. The labels it uses are uniform across the marketing group to indicate the status of a particular issue – `status: plan`, `status: WIP`, `status: scheduled`, or `status: review`. The labels automatically change when a particular issue is dragged between label lanes.\n\nThe use of these labels and the different team boards that live within the product marketing group allows anyone to take a look at the status of both individual issues and larger projects.\n\n### Team boards\n\nAnother option to configure an issue board is to base it on teams and sort based on an assignee. The team board view sorted by assignee allows you to see what each team member is working on.\n\n“We create boards based on assignee. This allows us to see who has what issue and what they're working on,\" says Tye. “Maybe your manager just wants to see what the team's working on or you're being a collaborative Agile team and want to just see what everyone's doing or what you could work on together.\"\n\n### Tracking progress\n\nThere are two main options for measuring work progress from a project management perspective: [milestones](https://docs.gitlab.com/ee/user/project/milestones/#project-milestones-and-group-milestones) and [burndown charts](https://docs.gitlab.com/ee/user/project/milestones/burndown_and_burnup_charts.html).\n\nMilestones are time-bound and track work output based on a specific timeframe (e.g., Q1 FY20 – a four-month period). When creating an issue, you can assign it to a specific milestone.\n\nBurndown charts reflect all the issues that are completed within the specific milestone. Once the time period (e.g., Q1 FY20), is up, you move any remaining and new work over to the next milestone (e.g., Q2 FY 2020).\n\n### Relating to GitLab customers\n\nWhile the marketing team and other teams across the company use GitLab as a project management tool, the majority of our customers are engineers that use GitLab as an Agile planning tool for developing code.\n\nWe can still relate to our customers through our use of issues and merge requests to make changes to the handbook, publish blog posts, among other activities in different repositories within GitLab.\n\nWhether you’re an infrastructure engineer, product marketing manager, or even an editor for the GitLab blog, the GitLab product functions as a sophisticated and customizable project management tool where collaboration and efficiency are baked into the function and design.\n\nWatch the video from [GitLab Contribute](/events/gitlab-contribute/) in New Orleans to see an overview of how GitLab can be used for project management, plus more on using GitLab for integrated campaigns and product marketing.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/tbg8KSyIWVg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nCover image by [Startaê Team](https://unsplash.com/@startaeteam) on [Unsplash](https://unsplash.com/s/photos/sticky-notes).\n{: .note}\n",[894,727,1347],{"slug":5702,"featured":6,"template":678},"gl-for-pm-prt-2","content:en-us:blog:gl-for-pm-prt-2.yml","Gl For Pm Prt 2","en-us/blog/gl-for-pm-prt-2.yml","en-us/blog/gl-for-pm-prt-2",{"_path":5708,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5709,"content":5714,"config":5719,"_id":5721,"_type":16,"title":5722,"_source":17,"_file":5723,"_stem":5724,"_extension":20},"/en-us/blog/gitlab-for-project-management-one",{"title":5710,"description":5711,"ogTitle":5710,"ogDescription":5711,"noIndex":6,"ogImage":5693,"ogUrl":5712,"ogSiteName":692,"ogType":693,"canonicalUrls":5712,"schema":5713},"How our tool fosters collaborative project management","Our marketing team explains how we use GitLab to manage complex projects. Read how GitLab can improve your collaboration on projects.","https://about.gitlab.com/blog/gitlab-for-project-management-one","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How our tool fosters collaborative project management\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-12-06\",\n      }",{"title":5710,"description":5711,"authors":5715,"heroImage":5693,"date":5716,"body":5717,"category":14,"tags":5718},[3676],"2019-12-06","\n\n_While it is true that there are few non-technical roles left in today’s business environment, it is notable that even folks outside of engineering use GitLab technology for collaborative project management. In this first part of our two-part series we outline the problems of siloed communications and how GitLab is structured to solve that for developers and everyone else. In part two, we’ll take a deep dive into how we used GitLab to manage an integrated marketing campaign and how our product marketing team uses GitLab for complex project management._\n\nImagine you’re trying to launch a new, integrated campaign. This campaign has a central message (e.g., \"Everyone can contribute\") and it pulls in representatives from many different teams – like social media, blogs, and field marketing – to create the designs and content that make this campaign a reality. The campaign structure is built and you’re ready to go – but wait – you’re working in a silo where communication between teams is challenging and there are strict rules about how information is conveyed.\n\nMarketing programs manager [Jackie Gragnola](/company/team/#jgragnola) kicked off the “GitLab for Non-Tech & Project Management Use\" breakout session at [GitLab Contribute New Orleans](/events/gitlab-contribute/) with an icebreaker game that mirrors this very conundrum. Breakout group participants were assigned teams as they tried to rebuild a gumdrop structure, but with strict communication guidelines. One person could see the structure, and relay what the structure looks like to three runners, who then described the structure to one builder.\n\nNeedless to say, the inefficiencies mounted quickly.\n\n\"The problem was one person could use their eyes, one person could use their mouth, one person could use their ears,\" said [Joyce Tompsett](/company/team/#Tompsett), analyst relations manager at GitLab and an observer/reporter in this game. \"So, even though everybody had all the component pieces they were only allowed to use one function at a time and then there was no return communication allowed.\"\n\nThe “can’t see the whole picture” problem is a common one in every industry and the solution is to make collaboration painless. [Collaboration is one of our core values at GitLab](https://handbook.gitlab.com/handbook/values/#collaboration) and it is fundamental to how we run our business and how we designed our tool. To understand how GitLab can work outside of software development it’s helpful to understand the underpinnings.\n\n## How GitLab works\n\nDeveloping software is similar in concept to baking a layer cake. You need a really strong foundation to keep your cake upright, and each coating of frosting between the cake layers acts as the glue that holds it all together. The top layer of frosting makes sure that all of your layers stay in one place (and makes sure that the layer cake is looking like a cake).\n\n![layercake](https://about.gitlab.com/images/blogimages/gitlab-for-proj-management/layercakev2.jpg){: .shadow.medium.center}\nA layer cake is a great analogy for how GitLab works as a project management tool.\n{: .note.text-center}\n\n\"The frosting between those layers is like webhooks or APIs; they’re actually the integrations that make the two pieces of software talk to each other,\" explains [JJ Cordz](/company/team/#jjcordz), senior marketing ops manager. \"Each task that's above the next one can get more complex because it's building off the foundation that you've already put into place.\"\n\nThe difference between the typical DevOps layer cake and the GitLab layer cake is that every activity or function fulfilled by a different layer of the cake (i.e., discrete piece of software) happens entirely within GitLab. In the GitLab layer cake, everything from project planning to execution allows teams to collaborate together within a single tool.\n\nOur description of the GitLab layer cake is actually how GitLab is structured today: With groups at the top, followed by epics, and projects that have issues, templates, etc. All of the layers can work together to build a fluid workflow, or they can be used independently.\n\n\"So all of those pieces together can actually standalone or you can put them all together and it makes a really awesome process in a workflow,\" says JJ. \"You can actually have lots of teams working together to get something massive done, but you've broken it down into little pieces.\"\n\n## Project management within GitLab\n\nIf you want to start thinking about getting \"something massive done\" within GitLab consider these basic steps:\n\n*   **Create a framework**: Before diving into a new project, a good project manager will first define what the ideal state is and will then build a framework for achieving this ideal state.\n*   **Assign directly responsible individuals (DRIs)**: The PM will assign DRIs to different components of the project. Each DRI is responsible for that particular component and is the person that you can follow-up with regarding that component throughout the project.\n*   **Templatize repeated tasks**: Keep things efficient with templates.\n*   **Set service level agreements (SLAs) at each handoff point**: Think about the due date and work backward to sort out how long different tasks should be taking.\n*   **Write rules of engagement and fallback instructions**\n*   **Define the feedback process**: Ensure that you have a place for people to ask questions, and make the room to iterate as you go along.\n\nWhat does this look like in the real world? Our marketing team built a project management structure within GitLab that allows multiple teams to collaborate within the [marketing group](https://gitlab.com/gitlab-com/marketing). Each team (e.g., [corporate marketing](https://gitlab.com/gitlab-com/marketing/corporate-marketing)) has their own project, where other groups and projects can live.\n\n[Epics](https://docs.gitlab.com/ee/user/group/epics/) – which represent projects that contain multiple issues – also live at the marketing group level rather than living within smaller team projects. The [epics live at the marketing group level](/handbook/marketing/#issues-milestones-and-epics) because oftentimes multiple marketing teams (e.g., corporate marketing, product marketing, etc.) will be tagged in different issues within a particular epic.\n\n[Efficiency](https://handbook.gitlab.com/handbook/values/#efficiency) is another one of our values at GitLab and the marketing team created templates within different marketing teams for repeat tasks to keep processes more uniform and efficient.\n\nWe also created a unified, global view that allows us to track the progress of various marketing projects. We have four labels: work in progress (wip), plan, review, and scheduled, that are assigned to a marketing issue that indicates the various stages. The labels allow [Todd Barr](/company/team/#tbarr), our chief marketing officer, and anyone else on the marketing team to see a global overview of various issues within marketing as they move from the idea to completion phase.\n\n![unifiedview](https://about.gitlab.com/images/blogimages/gitlab-for-proj-management/labels.png){: .shadow.large.center}\nA global overview of all the activities happening in marketing, separated and labeled according to their current status.\n{: .note.text-center}\n\nThe marketing team uses two-tiers for our epics: the highest level is the ancestor (formerly called \"parent\") epic, and below that is the child epic. There can be multiple issues associated with the child epic, but an issue can only be associated with one epic.\n\n![epic-diagram](https://about.gitlab.com/images/blogimages/gitlab-for-proj-management/parent-child-epics.png){: .shadow.large.center}\nHow the marketing team uses ancestor epics and child epics.\n{: .note.text-center}\n\nNow that you understand the basics of GitLab and project management within GitLab, watch the video on executing sophisticated and integrated marketing programs.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/tbg8KSyIWVg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nAnd don’t miss the second part of this series where we put the spotlight on our internal successes using GitLab for project management.\n\nCover image by [Startaê Team](https://unsplash.com/@startaeteam) on [Unsplash](https://unsplash.com/s/photos/sticky-notes).\n{: .note}\n",[894,727,1347],{"slug":5720,"featured":6,"template":678},"gitlab-for-project-management-one","content:en-us:blog:gitlab-for-project-management-one.yml","Gitlab For Project Management One","en-us/blog/gitlab-for-project-management-one.yml","en-us/blog/gitlab-for-project-management-one",{"_path":5726,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5727,"content":5733,"config":5738,"_id":5740,"_type":16,"title":5741,"_source":17,"_file":5742,"_stem":5743,"_extension":20},"/en-us/blog/e-factor-productivity",{"title":5728,"description":5729,"ogTitle":5728,"ogDescription":5729,"noIndex":6,"ogImage":5730,"ogUrl":5731,"ogSiteName":692,"ogType":693,"canonicalUrls":5731,"schema":5732},"Improve your productivity by tracking your time and measuring your E-factor","Sharing my personal experience of how tracking my time while working remotely helped me be more productive.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673115/Blog/Hero%20Images/e-factor.jpg","https://about.gitlab.com/blog/e-factor-productivity","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Improve your productivity by tracking your time and measuring your E-factor\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matej Latin\"}],\n        \"datePublished\": \"2019-11-26\",\n      }",{"title":5728,"description":5729,"authors":5734,"heroImage":5730,"date":5735,"body":5736,"category":14,"tags":5737},[4066],"2019-11-26","\nBack in the day, when I worked on-site and in open plan offices, I always felt unproductive despite being always busy. It was a paradox that I couldn’t understand. How come I’m rushing to do a lot of things all the time but still feel like I’m producing nothing that is truly valuable? Why do I get more work done in my “work from home day” that I only get every two weeks, than I do in the office?\n\nAfter joining GitLab and reading a couple of books on workplaces and productivity, I now understand why this was the case. Cal Newport’s [Deep Work](https://www.goodreads.com/book/show/25744928-deep-work) was the most illuminating book that I read on productivity. He breaks the types of work into two categories:\n\n**Shallow work**: *Non-cognitively demanding, logistical-style tasks, often performed while distracted. These efforts tend to not create much new value in the world and are easy to replicate.*\n\n**Deep work**: *The ability to focus, be uninterrupted for long stretches of time and fall into a [state of flow](https://en.wikipedia.org/wiki/Flow_(psychology)).*\n\nIn his **Deep Work Hypothesis**, he claims that the ability to focus separates the top performers from the rest:\n\n> The ability to perform deep work is becoming increasingly rare at exactly the same time it’s increasingly valuable in our economy. As a consequence, the few who cultivate this skill and then make it the core of their working life will thrive.\n\nWhile I was doing a lot of different things at the same time, it was mostly reactive work instead of valuable, [proactive](/handbook/product/ux/how-we-work/#proactive-and-reactive-ux) work. Replying to emails, attending meetings, chatting on Slack, and similar work demands a lot of energy but returns very little, if any, value. Taking this all into account, I decided to go back to working remotely because I knew [I could control my working environment better and be more productive](/blog/eliminating-distractions-and-getting-things-done/). That’s why I ended up joining GitLab.\n\n## The E-factor\n\n*Peopleware* by Tom DeMarco and Timothy Lister is another book that is popular with GitLab team members. In the book, the authors introduce a concept called *the E-factor*. To put it simply, the E-factor is about measuring brain time *versus* body time – so how much time a person is working at their full potential *versus* how much time they’re present at the office. The formula to calculate it is the following:\n\n> E-factor = uninterrupted hours / body-present hours\n\nSo when I worked in open plan offices, I was present for about eight hours, but had a maximum of about one or two hours of uninterrupted time. That means that my E-factor ranged from *0.125 to 0.25*. It’s impossible to produce valuable work with such a low E-factor. Switching to working remotely at an all-remote company immediately improved this but I recently decided to take it even further. I measured how I spent my time for two weeks while working at GitLab. The first week was to document how I had already been spending my time and then the second week with the introduction of improvements that would increase my uninterrupted time. Research suggests that intense concentration is only possible for up to four hours per day so I was aiming to get to four hours of uninterrupted time altogether, but ideally in a single block. Here’s how I spent time before the improvements:\n\n![My week before improvements](https://about.gitlab.com/images/blogimages/before-improvements.jpg){: .large.center}\n\nI tracked my time by dividing days into 15-minutes blocks. Light grey is sleep, light blue is family time, and dark blue is work time. Red colors are for shallow work, meetings and email time. The more of the dark blue blocks and the more connected the better.\n{: .note.text-center}\n\n{::options parse_block_html=\"true\" /}\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>&nbsp;&nbsp;\nGet the [Google Spreadsheet template for tracking time](https://docs.google.com/spreadsheets/d/10CnZlCW0fu-GXqGhK7Lysj5QzTGIqYjdv6yrUlbARzo/edit?usp=sharing). Go to *File* > *Make a copy* to get an editable version.\n&nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n{: .alert .alert-webcast}\n\nBefore introducing the improvements, this is how my usual day looked like:\n\n* I checked my email first thing in the morning, which could cause me to spend up to one hour just replying to other people.\n* I used to study a book or take course lessons in the morning as a part of professional self-improvement. This was usually half an hour. By the time I actually started working it’d be 9:30.\n* I’d work for a couple of hours and stop for a quick snack at 11:30. This was the first stretch of uninterrupted time.\n* After the snack I’d have another similar stretch of time but that was usually just an hour (mostly because I’d get distracted with shallow work).\n\nSo if I put all this together, I had about three hours of uninterrupted time every day. It’s not that bad (and it’s definitely better than what I experienced in on-site roles in the past) but I wanted to do better. I especially wanted to increase the amount of uninterrupted time in a single stretch. So I decided to make the following improvements:\n\n* I started checking my email in the afternoon, after lunch (that’s 3pm for me).\n* I moved the self-improvement activities until after the first snack at 11:30am.\n* I realized I spent an hour and a half showering and eating breakfast in the morning, which was way too much. I reduced this to one hour so I could start working 30 minutes earlier (8am instead of 8:30am).\n\n![My week after improvements](https://about.gitlab.com/images/blogimages/after-improvements.jpg){: .large.center}\nA lot more dark blue, and a lot more of connected dark blue blocks after improvements.\n{: .note.text-center}\n\nWith these improvements, I was able to increase the first stretch of uninterrupted time from two hours to three and a half hours. With an additional one to two hours of uninterrupted time after the snack that can sum up to four and a half to five and a half hours of uninterrupted time each day. My E-factor increased to *0.6875*, that’s a **275% increase** compared to my times in the office! These changes to my workflow help me perform deep work and fall into a state of flow twice a day, and I noticed drastic improvements in my productivity and in my psychological state as well.\n\n## Things that enabled me to introduce these improvements\n\n### Separate room for work\n\nI have a study at home where I can be alone and focus. I think this is a very important thing for all remote workers.\n\n### Strong working routine\n\nAt GitLab, working remotely and asynchronously gives us the [freedom to shape our working schedule as we please](https://handbook.gitlab.com/handbook/values/#managers-of-one) but a strong working routine has lots of benefits. Starting work at the same time in the morning helps with creating more uninterrupted time and productivity.\n\n### Timezone\nI’m based in Europe and most of my colleagues are based in the U.S. This means that I can easily block out time for focused work and eliminate all distractions, including Slack.\n\n### My Slack and email policy\n\nEven when I’m not in my focus time, [I have Slack notifications disabled](https://handbook.gitlab.com/handbook/values/#bias-towards-asynchronous-communication). I even disabled the small red dots on the app icon in the dock so that nothing has the possibility of distracting me. As for email, I’ll only check my inbox after lunch, that’s well after I had my two blocks of uninterrupted time.\n\n### Writing down tasks\n\nI always write down the things that I need to work on. I have a small notebook on my desk and at the end of each day, I write down the things I need to work on the next day. This way, I can go straight to work in the morning.\n\n### Keeping a journal of tasks\n\nRecently, I also started keeping track of all the things I need to work on in my “tasks journal”. It’s just a project on GitLab where I keep a couple of Markdown files for current tasks that I’m working on and an archive of tasks that I worked on in the past. They’re all divided by weeks. For example, at the time of writing this paragraph, it’s week 33 of this year so my [current tasks](https://gitlab.com/matejlatin/focus/blob/master/Tasks/current.md) are things that I want to work on in this week. At the end of the week, I’ll check the progress and [archive it](https://gitlab.com/matejlatin/focus/tree/master/Tasks) so I can always check back later.\n\nKeeping a task journal adds a stronger sense of continuity and sharp focus to my work. In the spirit of [transparency](https://handbook.gitlab.com/handbook/values/#public-by-default), I share this publicly with all my co-workers so everyone can see what I’m working on and check my availability.\n\n### Working asynchronously\n\nOne of the greatest benefits of working at GitLab is [being encouraged to work asynchronously](/handbook/communication/). Because our team isn't tied to the same working hours, I can block out time for focus without feeling guilty that I’m not available to everyone all the time. It’s interesting how working like this makes you realize that most interruptions aren’t as urgent as we tend to believe.\n\n## Advice for non-remote workers\n\nIf you’re required to work in an office – possibly a working environment full of distractions – implementing these strategies can be a lot more challenging. My advice for non-remote workers is to ask your manager for “work from home” days. Maybe start with one day per week and see how it goes. If your manager doesn't agree, try tracking your time when you work in the office like I did. Present the chart to them and tell them about the deep work and the E-factor. Explain to your manager that you want to increase your uninterrupted time which will help you complete more valuable work. Tell them how working from home will help you achieve this, and how you will change your workflow to be more productive (look for inspiration from my improvements I described in this article). Be committed to producing more meaningful work and be clear that working from home is only a means to an end. Offer to track your time at home to compare with your time spent your in the office, especially if your manager doesn’t seem to be in favor of these changes.\n\nIf working from home is still not an option, consider finding a quiet spot in the office where you’ll be uninterrupted: Perhaps the lounge, the garden, or even the reception area. Try moving to an area away from your teammates and sit with people you don’t know as well. They’re much less likely to disturb you. When I was working from a busy office in central London, I loved going to a coffee shop for an hour or two. I managed to get some work done and enjoyed the short trip to the shop and back. The walk and getting out of the office helped me relax a bit as well.\n\nThese changes to how we work are all about improving productivity and quality of work. In an ideal working environment, everyone would measure their E-factors and they’d brag about their uninterrupted time instead of complaining about how many meetings they have to attend in an effort to perform busyness to their colleagues.\n\nPhoto by [Émile Perron](https://unsplash.com/@emilep?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/productivity?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[915,1144,959],{"slug":5739,"featured":6,"template":678},"e-factor-productivity","content:en-us:blog:e-factor-productivity.yml","E Factor Productivity","en-us/blog/e-factor-productivity.yml","en-us/blog/e-factor-productivity",{"_path":5745,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5746,"content":5751,"config":5757,"_id":5759,"_type":16,"title":5760,"_source":17,"_file":5761,"_stem":5762,"_extension":20},"/en-us/blog/open-sourcing-the-gitter-mobile-apps",{"title":5747,"description":5748,"ogTitle":5747,"ogDescription":5748,"noIndex":6,"ogImage":4351,"ogUrl":5749,"ogSiteName":692,"ogType":693,"canonicalUrls":5749,"schema":5750},"Open-sourcing the Gitter mobile apps","Learn how we open sourced the Android and iOS Gitter apps.","https://about.gitlab.com/blog/open-sourcing-the-gitter-mobile-apps","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Open-sourcing the Gitter mobile apps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Eric Eastwood\"}],\n        \"datePublished\": \"2019-11-22\",\n      }",{"title":5747,"description":5748,"authors":5752,"heroImage":4351,"date":5754,"body":5755,"category":14,"tags":5756},[5753],"Eric Eastwood","2019-11-22","\nBefore we acquired Gitter most every part of Gitter was private/closed-source. The main [webapp](https://gitlab.com/gitlab-org/gitter/webapp) was open-sourced in June 2017 and got both mobile [Android](https://gitlab.com/gitlab-org/gitter/gitter-android-app)/[iOS](https://gitlab.com/gitlab-org/gitter/gitter-ios-app) apps open sourced in September 2018. If you would like to come help out, feel free to send us a merge request! This blog post will go over some the technical details of making the projects available for anyone to contribute.\n\nHere is the basic overview:\n\n1.  Find secrets in the current state of the project (don't worry about the commit history) and move to some config that isn't tracked in the repo.\n1.  Find/remove secrets throughout the whole repo commit history.\n1.  Make the project public 🎉\n1.  Caveats:\n    - Because we are rewriting the git history, I don't know of a way to keep merge requests/pull requests because the MRs reference the old commit hashes.\n\nQuick navigation:\n\n- [Jump to open sourcing Android](#android)\n- [Jump to open sourcing iOS](#ios)\n\n## Android\n\nIf you want to check out the full project and final result, you can check out the [project on GitLab](https://gitlab.com/gitlab-org/gitter/gitter-android-app) ([open-sourced 2018-8-8](https://twitter.com/gitchat/status/1027293167471812611)).\n\nTo start out, we used the [GitHub to GitLab project import](https://docs.gitlab.com/ee/user/project/import/github.html) to move the private GitHub project over to GitLab. We named it `gitter-android-app2` so that later on we could create the actual clean public project without any of the orphaned git references that may potentially leak.\n\n### Finding secrets\n\n[`truffleHog`](https://github.com/dxa4481/truffleHog) will search for high entropy strings (like tokens/passwords) through the entire git repo history. It's also useful to find all the potential areas where secrets may still exist in the current state of the project. Some sticky points we encountered while using include:\n\n- \"I wish we could just search the current state of the project instead of all git history (the `--max_depth=2` argument will just make it search the diff of the latest commit)\" [dxa4481/truffleHog#92](https://github.com/dxa4481/truffleHog/issues/92).\n- \"The output will show the entire diff for the triggered commit which is a bit burdensome to see exactly what is wrong. The JSON output `--json` is sometimes easier to understand\" [https://github.com/dxa4481/truffleHog/issues/58](https://github.com/dxa4481/truffleHog/issues/58) or [dxa4481/truffleHog#102](https://github.com/dxa4481/truffleHog/issues/102).\n\n### Moving secrets to untracked config\n\nOnce we figure out where all of the secrets are we need a config/variable solution that isn't tracked by git but still lets them be available when building. We also wanted the solution to work in GitLab CI for some sanity builds/testing. There are lots of good articles on this topic:\n\n- [Remove private signing information from your project](https://developer.android.com/studio/build/gradle-tips#remove-private-signing-information-from-your-project)\n- [Keeping Your Android Project’s Secrets Secret](https://medium.com/@geocohn/keeping-your-android-projects-secrets-secret-393b8855765d)\n- [Hiding Secrets in Android Apps](https://rammic.github.io/2015/07/28/hiding-secrets-in-android-apps/)\n- [Keeping secrets in an Android Application](https://joshmcarthur.com/2014/02/16/keeping-secrets-in-an-android-application.html)\n- [Android: Loading API Keys and other secrets from properties file using gradle](https://gist.github.com/curioustechizen/9f7d745f9f5f51355bd6)\n- [How can I keep API keys out of source control?](https://arstechnica.com/information-technology/2013/12/how-can-i-keep-api-keys-out-of-source-control/)\n\nOur solution is completely based on the information in these articles. We chose to go the route of defining things in a `secrets.properties` file which can easily be read in the Gradle build script which handles the build even when using Android Studio. If the `secrets.properties` file doesn't exist (like in CI), it will try to read the secrets from [environment variables which can easily be supplied in the project settings](https://docs.gitlab.com/ee/ci/variables/).\n\n`secerts.properties`\n\n```properties\n# Visit https://developer.gitter.im/apps (sign in) and create a new app\n# Name: my-gitter-android-app (can be anything)\n# Redirect URL: https://gitter.im/login/oauth/callback\noauth_client_id=\"...\"\noauth_client_secret=\"...\"\noauth_redirect_uri=\"https://gitter.im/login/oauth/callback\"\n```\n\n`build.gradle`\n\n```gradle\napply plugin: 'com.android.application'\n\n// Try reading secrets from file\ndef secretsPropertiesFile = rootProject.file(\"secrets.properties\")\ndef secretProperties = new Properties()\nif (secretsPropertiesFile.exists()) {\n    secretProperties.load(new FileInputStream(secretsPropertiesFile))\n}\n// Otherwise read from environment variables, this happens in CI\nelse {\n    secretProperties.setProperty(\"oauth_client_id\", \"\\\"${System.getenv('oauth_client_id')}\\\"\")\n    secretProperties.setProperty(\"oauth_client_secret\", \"\\\"${System.getenv('oauth_client_secret')}\\\"\")\n    secretProperties.setProperty(\"oauth_redirect_uri\", \"\\\"${System.getenv('oauth_redirect_uri')}\\\"\")\n}\n\nandroid {\n    ...\n\n    defaultConfig {\n        ...\n\n        buildConfigField(\"String\", \"oauth_client_id\", \"${secretProperties['oauth_client_id']}\")\n        buildConfigField(\"String\", \"oauth_client_secret\", \"${secretProperties['oauth_client_secret']}\")\n        buildConfigField(\"String\", \"oauth_redirect_uri\", \"${secretProperties['oauth_redirect_uri']}\")\n    }\n    ...\n}\n```\n\nUse the config variables in the Java app:\n\n```java\nimport im.gitter.gitter.BuildConfig;\n\nBuildConfig.oauth_client_id;\nBuildConfig.oauth_client_secret;\nBuildConfig.oauth_redirect_uri;\n```\n\n#### Removing compiled assets\n\nWe use a `WebView` to display the HTML markdown messages in the chat room. This view uses assets built from the main [`webapp` project](https://gitlab.com/gitlab-org/gitter/webapp). Because these assets had some inlined production [`webapp`](https://gitlab.com/gitlab-org/gitter/webapp) secrets that whole directory needed to be removed.\n\nInitially, we opted to have the developer build these assets with their own secrets and symlink the build output directory. The [community made this even simpler](https://gitlab.com/gitlab-org/gitter/gitter-android-app/merge_requests/113), so now there is just a Gradle task to run which fetches the latest build we have available from the `webapp` GitLab CI.\n\n### Removing secrets from the repo history\n\nFrom your `truffleHog` results earlier, you should know where secrets were stored throughout the history. We can use [BFG Repo-Cleaner](https://rtyley.github.io/bfg-repo-cleaner/) to remove and rewrite the repo history quickly.\n\nWhen using BFG, I wanted just to rewrite all of the sensitive values in `app/src/main/res/values/settings.xml` instead of completely removing them, but rewriting isn't an option with BFG so I went ahead with deleting it and recreated it in a commit afterwards. 🤷\n\nFor the Android app, here are the BFG commands I used,\n\n- Remove `app/src/main/assets/www/`\n  - `java -jar \"bfg.jar\" --delete-folders www`\n- Remove `app/src/main/res/values/settings.xml`\n  - `java -jar \"bfg.jar\" --delete-files settings.xml`\n- Remove sensitive strings where we can't just remove the whole file (collected from `truffleHog` results)\n  - `java -jar \"bfg.jar\" --replace-text \"gitter-android-bad-words.txt\"`\n\nAfter you think you removed all the secrets, it's best to run `truffleHog` again just to make sure no secrets are leftover. 😉\n\n### Make it public\n\nNow it's time to update your `readme` with some setup instruction so the community knows how to contribute.\n\nThis is the scary part 😅. Go to **Project settings** > **General** > **Permissions** > set **Project visibility** as **Public**. You can [read more about project access here](https://docs.gitlab.com/ee/public_access/public_access.html).\n\nCurious about how to setup builds in GitLab CI? [Learn more from this blog post](/blog/setting-up-gitlab-ci-for-android-projects/), which was what we used to set it up for our projects.\n\nYou can even learn how we [automated the release process so we can publish straight to the Google Play Store from GitLab CI via fastlane 🚀](/blog/android-publishing-with-gitlab-and-fastlane/).\n\n## iOS\n\nIf you want to see the full project and final result, you can check out the [project on GitLab](https://gitlab.com/gitlab-org/gitter/gitter-ios-app) ([open-sourced 2018-9-18](https://twitter.com/gitchat/status/1041795909103898625)).\n\nThe same concepts apply from the Android section. We create a separate private project, `gitter-ios-app2`, where we can work and later on, we can create the actual clean public project(`gitter-ios-app`) without any of the orphaned git references that could leak.\n\n### Finding secrets\n\n`truffleHog` didn't work well in the iOS project because there was a bunch of generated XCode files that had file hashes (high entropy strings which truffleHog looks for) – which meant every commit was listed. 🤦‍ Instead of trying to find something to filter the results down or get another tool, I decided just search manually. Here is the list of things we looked for:\n\n- `token`\n- `secret`\n- `key`\n- `cert`\n- `api`\n- `pw`\n- `password`\n\nI used this directory filter when `Ctrl + f` those strings above to avoid finding things outside of the repo itself (copy-paste for Atom editor): `!Common/,!Libraries,!Gitter/www,!Pods/,!xctool`\n\n### Moving secrets to untracked config\n\nThe iOS app uses a few git sub-modules which we also had to check for secrets before making them public. It turned out only one of the sub-modules – [`troupeobjccommon`](https://gitlab.com/gitlab-org/gitter/troupeobjccommon) – had secrets of it's own so I ran through the same secret removal process.\n\nWe had the same OAuth secrets in the main part of the iOS app, but since `troupeobjccommon` was also trying to handle OAuth secret settings, we opted for putting the new logic in `troupeobjccommon` to avoid having to refactor whatever other downstream code that uses the same submodule (like the macOS desktop app).\n\nHere are some articles around handling secrets in an iOS project,\n\n- [Secret variables in Xcode AND your CI for fun and profit 💌](https://medium.com/flawless-app-stories/secret-variables-in-xcode-and-your-ci-for-fun-and-profit-d387a50475d7)\n- [Secrets Management in iOS Applications](https://medium.com/@jules2689/secrets-management-in-ios-applications-52795c254ec1)\n\nSince iOS apps can only be built on macOS and we don't have any macOS GitLab CI runners, our solution doesn't have to be CI compatible. You can track [this issue for shared macOS GitLab CI runners](https://gitlab.com/gitlab-com/gl-infra/infrastructure/issues/5720).\n\n`Gitter/GitterSecrets-Dev.plist`\n\n```xml\n\u003C?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\u003C!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n\u003Cplist version=\"1.0\">\n\u003Cdict>\n  \u003C!--\n  Visit https://developer.gitter.im/apps (sign in) and create a new app\n  Name: my-gitter-ios-app (can be anything)\n  Redirect URL: https://gitter.im/login/oauth/callback\n  -->\n  \u003Ckey>OAuthClientId\u003C/key>\n  \u003Cstring>\u003C/string>\n  \u003Ckey>OAuthClientSecret\u003C/key>\n  \u003Cstring>\u003C/string>\n  \u003Ckey>OAuthCallback\u003C/key>\n  \u003Cstring>https://gitter.im/login/oauth/callback\u003C/string>\n\u003C/dict>\n\u003C/plist>\n```\n\n[`troupeobjccommon`](https://gitlab.com/gitlab-org/gitter/troupeobjccommon) is in Objective-C\n\n`TRAppSettings.h`\n\n```h\n#import \u003CFoundation/Foundation.h>\n\n@interface TRAppSettings : NSObject\n\n+ (TRAppSettings *) sharedInstance;\n\n- (NSString *) clientID;\n\n- (NSString *) clientSecret;\n\n- (NSString *) oauthScope;\n\n@end\n```\n\n`TRAppSettings.m`\n\n```objc\n@interface TRAppSettings ()\n\n@property (strong, nonatomic) NSUserDefaults *secrets;\n\n@end\n\nstatic TRAppSettings *sharedAppSettingsSingleton;\n\n@implementation TRAppSettings {\n    int firstRunPostUpdate;\n}\n\n+ (void)initialize\n{\n    static BOOL initialized = NO;\n    if(!initialized)\n    {\n        initialized = YES;\n        sharedAppSettingsSingleton = [[TRAppSettings alloc] init];\n    }\n\n    NSLog(@\"Pulling secrets from SECRETS_PLIST = %@.plist\", SECRETS_PLIST);\n}\n\n+ (TRAppSettings *) sharedInstance\n{\n    return sharedAppSettingsSingleton;\n}\n\n- (id)init {\n    NSString *troupeSecretsPath = [[NSBundle mainBundle] pathForResource:\"GitterSecrets-Dev\" ofType:@\"plist\"];\n    if(troupeSecretsPath == nil) {\n        NSString *failureReason = [NSString stringWithFormat:@\"Gitter secrets file not found in bundle: %@.plist. You probably need to add it to the `Gitter/Supporting Files` in Xcode navigator\", SECRETS_PLIST];\n        NSException* exception = [NSException\n            exceptionWithName:@\"FileNotFoundException\"\n            reason:failureReason\n            userInfo:nil];\n\n        NSLog(@\"%@\", failureReason);\n\n        [exception raise];\n    }\n    NSDictionary *troupeSecrets = [NSDictionary dictionaryWithContentsOfFile:troupeSecretsPath];\n\n    self.secrets = [NSUserDefaults standardUserDefaults];\n    [self.secrets registerDefaults:troupeSecrets];\n}\n\n- (NSString *) clientID {\n    return [self.secrets stringForKey:@\"OAuthClientId\"];\n}\n\n- (NSString *) clientSecret {\n    return [self.secrets stringForKey:@\"OAuthClientSecret\"];\n}\n\n- (NSString *)oauthScope {\n    return [self.secrets stringForKey:@\"OAuthCallback\"];\n}\n```\n\nUsage in the Swift app:\n\n```swift\nprivate let appSettings = TRAppSettings.sharedInstance()\n\nappSettings!.clientID()\nappSettings!.clientSecret()\nappSettings!.oauthScope()\n```\n\n### Adding in GitLab CI\n\nIf you're interested in setting up automated builds and publish releases to the Apple App Store from GitLab CI, you can learn how [blog post about using fastlane](/blog/ios-publishing-with-gitlab-and-fastlane/).\n\n### Removing secrets from the repo history\n\nWe didn't have a complete picture of what to remove because `truffleHog` didn't work well, so we didn't use BFG Repo-Cleaner. To remove secrets from the git repo history, we just squashed all of the history into a single commit.\n\n## Life after open sourcing apps\n\nWe have some [thoughts of deprecating the Android/iOS apps](https://gitlab.com/gitlab-org/gitter/webapp/issues/2281) but the community has been great to keep the apps alive so far. We released a couple versions of each app including [dark theme](https://gitlab.com/gitlab-org/gitter/gitter-android-app/merge_requests/2) and [GitLab sign-in](https://gitlab.com/gitlab-org/gitter/gitter-android-app/merge_requests/112) for Android and a bunch of technical debt and fixes for iOS, including removing the deprecated [`SlackTextViewController`](https://gitlab.com/gitlab-org/gitter/gitter-ios-app/merge_requests/8) (and we are intensely working on incorporating the new [`SlackWysiwygInputController`](https://goo.gl/7NDM3x) 😜).\n\nThe [Android](https://gitlab.com/gitlab-org/gitter/gitter-android-app)/[iOS](https://gitlab.com/gitlab-org/gitter/gitter-ios-app) apps could benefit from a lot of polish and fixes, so if you see anything particularly annoying, we would love to review and merge your updates!\n\nCover image by [Nate Johnston](https://unsplash.com/@natejohnston) on [Unsplash](https://unsplash.com/photos/DkCydKeaLV8).\n{: .note}\n",[703,1347,1445,110],{"slug":5758,"featured":6,"template":678},"open-sourcing-the-gitter-mobile-apps","content:en-us:blog:open-sourcing-the-gitter-mobile-apps.yml","Open Sourcing The Gitter Mobile Apps","en-us/blog/open-sourcing-the-gitter-mobile-apps.yml","en-us/blog/open-sourcing-the-gitter-mobile-apps",{"_path":5764,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5765,"content":5771,"config":5777,"_id":5779,"_type":16,"title":5780,"_source":17,"_file":5781,"_stem":5782,"_extension":20},"/en-us/blog/tracking-down-missing-tcp-keepalives",{"title":5766,"description":5767,"ogTitle":5766,"ogDescription":5767,"noIndex":6,"ogImage":5768,"ogUrl":5769,"ogSiteName":692,"ogType":693,"canonicalUrls":5769,"schema":5770},"Tracking TCP Keepalives: Lessons in Docker, Golang & GitLab","An in-depth recap of debugging a bug in the Docker client library.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680874/Blog/Hero%20Images/network.jpg","https://about.gitlab.com/blog/tracking-down-missing-tcp-keepalives","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What tracking down missing TCP Keepalives taught me about Docker, Golang, and GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Stan Hu\"}],\n        \"datePublished\": \"2019-11-15\",\n      }",{"title":5772,"description":5767,"authors":5773,"heroImage":5768,"date":5774,"body":5775,"category":14,"tags":5776},"What tracking down missing TCP Keepalives taught me about Docker, Golang, and GitLab",[670],"2019-11-15","\n\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2019-12-03.\n{: .alert .alert-info .note}\n\nWhat began as failure in a GitLab static analysis check led to a\ndizzying investigation that uncovered a subtle [bug in the Docker client\nlibrary code](https://github.com/docker/for-linux/issues/853) used by\nthe GitLab Runner. We ultimately worked around the problem by upgrading\nthe Go compiler, but in the process we uncovered an unexpected change in\nthe Go TCP keepalive defaults that fixed an issue with Docker and GitLab\nCI.\n\nThis investigation started on October 23, when backend engineer [Luke\nDuncalfe](/company/team/#.luke) mentioned, \"I'm seeing\n[`static-analysis` failures with no output](https://gitlab.com/gitlab-org/gitlab/-/jobs/331174397).\nIs there something wrong with this job?\" He opened [a GitLab\nissue](https://gitlab.com/gitlab-org/gitlab/issues/34951) to discuss.\n\nWhen Luke ran the static analysis check locally on his laptop, he saw\nuseful debugging output when the test failed. For example, an extraneous\nnewline would accurately be reported by Rubocop. However, when the same\ntest ran in GitLab's automated test infrastructure, the test failed\nquietly:\n\n![Failed job](https://about.gitlab.com/images/blogimages/docker-tcp-keepalive-debug/job-failure.png){: .shadow.center}\n\nNotice how the job log did not include any clues after the `bin/rake\nlint:all` step. This made it difficult to determine whether a real\nproblem existed, or whether this was just a flaky test.\n\nIn the ensuing days, numerous team members reported the same problem.\nNothing kills productivity like silent test failures.\n\n## Was something wrong with the test itself?\n\nIn the past, we had seen that if that specific test generated enough\nerrors, [the output buffer would fill up, and the continuous integration\n(CI) job would lock\nindefinitely](https://gitlab.com/gitlab-org/gitlab-foss/issues/61432). We\nthought we had [fixed that issue months\nago](https://gitlab.com/gitlab-org/gitlab-foss/merge_requests/28402). Upon\nfurther review, that fix seemed to eliminate any chance of a thread\ndeadlock.\n\nDid we have to flush the buffer? No, because the Linux kernel will do\nthat for an exiting process already.\n\n## Was there a change in how CI logs were handled?\n\nWhen a test runs in GitLab CI, the [GitLab\nRunner](https://gitlab.com/gitlab-org/gitlab-runner/) launches a Docker\ncontainer that runs commands specified by a `.gitlab-ci.yml` inside the\nproject repository. As the job runs, the runner streams the output to\nthe GitLab API via PATCH requests. The GitLab backend saves this data\ninto a file. The following sequence diagram shows how this works:\n\n```plantuml\n== Get a job! ==\nRunner -> GitLab: POST /api/v4/jobs/request\nGitLab -> Runner: 201 Job was scheduled\n\n== Job sends logs (1 of 2) ==\nRunner -> GitLab: PATCH /api/v4/job/:id/trace\nGitLab -> File: Save to disk\nGitLab -> Runner: 202 Accepted\n\n== Job sends logs (2 of 2) ==\nRunner -> GitLab: PATCH /api/v4/job/:id/trace\nGitLab -> File: Save to disk\nGitLab -> Runner: 202 Accepted\n```\n\n[Henrich Lee Yu](/company/team/#engwan) mentioned\nthat we had recently [disabled a feature flag that changed how GitLab\nhandled CI job\nlogs](https://docs.gitlab.com/ee/administration/job_logs.html#new-incremental-logging-architecture). [The\ntiming seemed to line\nup](https://gitlab.com/gitlab-org/gitlab/issues/34951#note_236723888).\n\nThis feature, called live CI traces, eliminates the need for a shared\nPOSIX filesystem (e.g., NFS) when saving job logs to disk by:\n\n1. Streaming data into memory via Redis\n2. Persisting the data in the database (PostgreSQL)\n3. Archiving the final data into object storage\n\nWhen this flag is enabled, the flow of CI job logs looks something like\nthe following:\n\n```plantuml\n== Get a job! ==\nRunner -> GitLab: POST /api/v4/jobs/request\nGitLab -> Runner: 201 Job was scheduled\n\n== Job sends logs ==\nRunner -> GitLab: PATCH /api/v4/job/:id/trace\nGitLab -> Redis: Save chunk\nGitLab -> Runner: 202 Accepted\n...\n== Copy 128 KB chunks from Redis to database ==\nGitLab -> Redis: GET gitlab:ci:trace:id:chunks:0\nGitLab -> PostgreSQL: INSERT INTO ci_build_trace_chunks\n...\n== Job finishes ==\n\nRunner -> GitLab: PUT /api/v4/job/:id\nGitLab -> Runner: 200 Job was updated\n\n== Archive trace to object storage ==\n```\n\nLooking at the flow diagram above, we see that this approach has more\nsteps. After receiving data from the runner, something could have gone\nwrong with handling a chunk of data. However, we still had many\nquestions:\n\n1. Did the runners send the right data in the first place?\n1. Did GitLab drop a chunk of data somewhere?\n1. Did this new feature actually have anything to do with the problem?\n1. Are they really making another Gremlins movie?\n\n## Reproducing the bug: Simplify the `.gitlab-ci.yml`\n\nTo help answer those questions, we simplified the `.gitlab-ci.yml` to\nrun only the `static-analysis` step. We inserted a known Rubocop error,\nreplacing a `eq` with `eql`. We first ran this test on a separate GitLab\ninstance with a private runner. No luck there – the job showed the right\noutput:\n\n```\nOffenses:\n\nee/spec/models/project_spec.rb:55:42: C: RSpec/BeEql: Prefer be over eql.\n        expect(described_class.count).to eql(2)\n                                         ^^^\n\n12669 files inspected, 1 offense detected\n```\n\nHowever, we repeated the test on our staging server and found that we\nreproduced the original problem. In addition, the live CI trace feature\nflag had been activated on staging. Since the problem occurred with and\nwithout the feature, we could eliminate that feature as a possible\ncause.\n\nPerhaps something with the GitLab server environment caused a\nproblem. For example, could the load balancers be rate-limiting the\nrunners? As an experiment, we pointed a private runner at the staging\nserver and re-ran the test. This time, it succeeded: the output was\nshown. That seemed to suggest that the problem had more to do with the\nrunner than with the server.\n\n## Docker Machine vs. Docker\n\nOne key difference between the two tests: One runner used a shared,\nautoscaled runner using a [Docker\nMachine](https://docs.docker.com/machine/overview/) executor, and the\nprivate runner used a [Docker\nexecutor](https://docs.gitlab.com/runner/executors/docker.html).\n\nWhat does Docker Machine do exactly? The following diagram may help\nillustrate:\n\n![Docker Machine](https://docs.docker.com/machine/img/machine.png){: .medium.center}\n\nThe top-left shows a local Docker instance. When you run Docker from the\ncommand-line interface (e.g., `docker attach my-container`), the program\njust makes [REST calls to the Docker Engine\nAPI](https://docs.docker.com/engine/api/v1.40/).\n\nThe rest of the diagram shows how Docker Machine fits into the\npicture. Docker Machine is an entirely separate program. The GitLab\nRunner shells out to `docker-machine` to create and destroy virtual\nmachines using cloud-specific (e.g. Amazon, Google, etc.) drivers. Once\na machine is running, the runner then uses the Docker Engine API to run,\nwatch, and stop containers.\n\nNote that this API is used securely over an HTTPS connection. This is an\nimportant difference between the Docker Machine executor and Docker\nexecutor: The former needs to communicate across the network, while the\nlatter can either use a local TCP socket or UNIX domain socket.\n\n## Google Cloud Platform timeouts\n\nWe've known for a while that Google Cloud [has a 10-minute idle\ntimeout](https://cloud.google.com/compute/docs/troubleshooting/general-tips),\nwhich has caused issues in the past:\n\n> Note that idle connections are tracked for a maximum of 10 minutes,\n> after which their traffic is subject to firewall rules, including the\n> implied deny ingress rule. If your instance initiates or accepts\n> long-lived connections with an external host, you should adjust TCP\n> keep-alive settings on your Compute Engine instances to less than 600\n> seconds to ensure that connections are refreshed before the timeout\n> occurs.\n\nWas the problem caused by this timeout? With the Docker Machine\nexecutor, we found that we could reproduce the problem with a simple\n`.gitlab-ci.yml`:\n\n```yaml\nimage: \"busybox:latest\"\n\ntest:\n  script:\n    - date\n    - sleep 601\n    - echo \"Hello world!\"\n    - date\n    - exit 1\n```\n\nThis would reproduce the failure, where we would never see the `Hello\nworld!` output. Changing the `sleep 601` to `sleep 599` would make the\nproblem go away. Hurrah! All we have to do is tweak the system TCP\nkeepalives, right? Google provided these sensible settings:\n\n```sh\nsudo /sbin/sysctl -w net.ipv4.tcp_keepalive_time=60 net.ipv4.tcp_keepalive_intvl=60 net.ipv4.tcp_keepalive_probes=5\n```\n\nHowever, enabling these kernel-level settings didn't solve the\nproblem. Were keepalives even being sent? Or was there some other issue?\nWe turned our attention to network traces.\n\n## Eavesdropping on Docker traffic\n\nIn order to understand what was happening, we needed to be able to\nmonitor the network communication between the runner and the Docker\ncontainer. But how exactly does the GitLab Runner stream data from a\nDocker container to the GitLab server?  The following diagram\nillustrates the flow:\n\n```plantuml\nRunner -> Docker: POST /containers/name/attach\nDocker -> Runner: \u003Ccontainer output>\nDocker -> Runner: \u003Ccontainer output>\nRunner -> GitLab: PATCH /api/v4/job/:id/trace\nGitLab -> File: Save to disk\nGitLab -> Runner: 202 Accepted\n```\n\nFirst, the runner makes a [POST request to attach to the container\noutput](https://docs.docker.com/engine/api/v1.40/#operation/ContainerAttach).\nAs soon as a process running in the container outputs some data, Docker\nwill transmit the data over this HTTPS stream. The runner then copies\nthis data to GitLab via the PATCH request.\n\nHowever, as mentioned earlier, traffic between a GitLab Runner and the\nremote Docker machine is encrypted over HTTPS on port 2376. Was there an\neasy way to disable HTTPS? Searching through the code of Docker Machine,\nwe found that it did not appear to be supported out of the box.\n\nSince we couldn't disable HTTPS, we had two ways to eavesdrop:\n\n1. Use a man-in-the-middle proxy (e.g. [mitmproxy](https://mitmproxy.org/))\n1. Record the traffic and decrypt the traffic later using the private keys\n\n## Ok, let's be the man-in-the-middle!\n\nThe first seemed more straightforward, since [we already had experience\ndoing this with the Docker\nclient](https://docs.gitlab.com/ee/administration/packages/container_registry.html#running-the-docker-daemon-with-a-proxy).\n\nHowever, after [defining the proxy variables for GitLab\nRunner](https://docs.gitlab.com/runner/configuration/proxy.html#adding-proxy-variables-to-the-runner-config),\nwe found we were only able to intercept the GitLab API calls with\n`mitmproxy`. The Docker API calls still went directly to the remote\nhost. Something wasn't obeying the proxy configuration, but we didn't\ninvestigate further. We tried the second approach.\n\n## Decrypting TLS data\n\nTo decrypt TLS data, we would need to obtain the encryption keys. Where\nwere these located for a newly-created system with `docker-machine`? It\nturns out `docker-machine` worked in the following way:\n\n1. Call the Google Cloud API to create a new machine\n1. Create a `/root/.docker/machine/machines/:machine_name` directory\n1. Generate a new SSH keypair\n1. Install the SSH key on the server\n1. Generate a new TLS certificate and key\n1. Install and configure Docker on the newly-created machine with TLS certificates\n\nAs long as the machine runs, the directory will contain the information\nneeded to decode this traffic. We ran `tcpdump` and saved the private keys.\n\nOur first attempt at decoding the traffic failed. Wireshark could not\ndecode the encrypted traffic, although general TCP traffic could still\nbe seen. Researching more, we found out why: If the encrypted traffic\nused a [Diffie-Hellman key\nexchange](https://en.wikipedia.org/wiki/Diffie%E2%80%93Hellman_key_exchange),\nhaving the private keys would not suffice! This is by design, a property\ncalled [perfect forward\nsecrecy](https://en.m.wikipedia.org/wiki/Forward_secrecy).\n\nTo get around that limitation, we modified the GitLab Runner to disable\ncipher suites that used the Diffie-Hellman key exchange:\n\n```diff\ndiff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go\nindex 6b4c6a7c0..a3f86d756 100644\n",[268,702,535,832,728,832,771,1328,749],{"slug":5778,"featured":6,"template":678},"tracking-down-missing-tcp-keepalives","content:en-us:blog:tracking-down-missing-tcp-keepalives.yml","Tracking Down Missing Tcp Keepalives","en-us/blog/tracking-down-missing-tcp-keepalives.yml","en-us/blog/tracking-down-missing-tcp-keepalives",{"_path":5784,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5785,"content":5791,"config":5797,"_id":5799,"_type":16,"title":5800,"_source":17,"_file":5801,"_stem":5802,"_extension":20},"/en-us/blog/the-consul-outage-that-never-happened",{"title":5786,"description":5787,"ogTitle":5786,"ogDescription":5787,"noIndex":6,"ogImage":5788,"ogUrl":5789,"ogSiteName":692,"ogType":693,"canonicalUrls":5789,"schema":5790},"The Consul outage that never happened","Sometimes a good plan is the best tool for the job.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679092/Blog/Hero%20Images/consul-outage-image.jpg","https://about.gitlab.com/blog/the-consul-outage-that-never-happened","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The Consul outage that never happened\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Devin Sylva\"}],\n        \"datePublished\": \"2019-11-08\",\n      }",{"title":5786,"description":5787,"authors":5792,"heroImage":5788,"date":5794,"body":5795,"category":14,"tags":5796},[5793],"Devin Sylva","2019-11-08","\nWhen things go wrong on a large website, it can be fun to read the dramatic stories of high pressure incidents where nothing goes as planned. It makes for good reading. Every once in a while though, we get a success story. Every once in a while, things go exactly as planned.\n\n[GitLab.com](http://GitLab.com) is a large, high availability instance of GitLab. It is maintained by the [Infrastructure group](/company/team/?department=infrastructure-department), which currently consists of 20 to 24 engineers (depending on how you count), four managers, and a director, distributed all around the world. Distributed, in this case, does not mean across a few different offices. There are three or four major cities which have more than one engineer but with the exception of coworking days nobody is working from the same building.\n\nIn order to handle the load generated by about four million users working on around 12 million projects, GitLab.com breaks out the individual components of the GitLab product and currently spreads them out over 271 production servers.\n\nThe site is slowly migrating to using Hashicorp's [Consul](https://www.consul.io) for service location. Consul can be thought of like DNS, in that it associates a well-known name with the actual physical location of that service. It also provides other useful functions such as storing dynamic configuration for services, as well as locking for clusters. All of the Consul client and server components talk to each other over encrypted connections. These connections require a certificate at each end to validate the identity of the client and server and to provide the encryption key. The main component of GitLab.com which currently relies on this service is the database and its high availability system [Patroni](https://patroni.readthedocs.io/en/latest/). Like any website that provides functionality and not just information, the database is the central service that everything else depends on. Without the database, the website, API, CI pipelines, and git services will all deny requests and return errors.\n\n## Troubleshooting\n\nThe [issue](https://gitlab.com/gitlab-com/gl-infra/production/issues/1037) came to our attention when a database engineer noticed that one of our database servers in the staging environment could not reconnect to the staging Consul server after the database node was restarted.\n\nIt turns out that the TLS certificate was expired. This is normally a simple fix. Someone would go to the Certificate Authority (CA) and request a renewal – or if that fails, generate a new certificate to be signed by the same CA. That certificate would replace the expired copy and the service would be restarted. All of the connections should reestablish using the new certificate and just like with any other rolling configuration change, it should be transparent to all users.\n\nAfter looking everywhere, and asking everyone on the team, we got the definitive answer that the CA key we created a year ago for this self-signed certificate had been lost.\n\nThese test certificates were generated for the original proof-of-concept installation for this service and were never intended to be transitioned into production. However, since everything was working perfectly, the expired test certificate had not been calling attention to itself. A few things should have been done, including: Rebuilding the service with production in mind; conducting a production readiness review; and monitoring. But a year ago, our production team was in a very different place. We were small with just four engineers, and three new team members: A manager, director, and engineer, all of whom were still onboarding. We were less focused on the gaps that led to this oversight a year ago and more focused on fixing the urgent problem today.\n\n### Validating the problem\n\nFirst, we needed to validate the problem using the information we'd gathered. Since we couldn't update the existing certificates, we turned validation off on the client that couldn't connect. Turning validation off didn't change anything since the encrypted connections validate both the cluster side and client side. Next, we changed the setting on one server node in the cluster and so the restarted client could then connect to the server node. The problem now was that the server could no longer connect to any other cluster node and could not rejoin the cluster. The server we changed was not validating connections, meaning it was ignoring the expired certificate of its peers in the cluster but the peers were not returning the favor. They were shunning it, putting the whole cluster in a degraded state.\n\nWe realized that no matter what we did, some servers and some clients would not be able to connect to each other until after the change had been made everywhere and after every service was restarted. Unfortunately, we were talking about 255 of our 271 servers. Our tool set is designed for gradual rollouts, not simultaneous actions.\n\nWe were unsure why the site was even still online because if the clients and services could not connect it was unclear why anything was still working. We ran a small test, confirming the site was only working because the connections were already established when the certificates expired. Any interruption of these long-running connections would cause them to revalidate the new connections, resulting in them rejecting all new connections across the fleet.\n\n> Effectively, we were in the middle of an outage that had already started, but hadn't yet gotten to the point of taking down the site.\n\n### Testing in staging\n\nWe declared an incident and began testing every angle we could think of in the staging environment, including:\n\n* Reloading the configuration of the running service, which worked fine and did not drop connections, but the [certificate settings](https://github.com/hashicorp/consul/pull/4204) are [not included in the reloadable settings](https://www.consul.io/docs/agent/options.html#reloadable-configuration) for our version of Consul.\n* Simultaneous restarts of various services, which worked, but our tools wouldn't allow us to do that with ALL of the nodes at once.\n\nEverything we tried indicated that we had to break those existing connections in order to activate any change, and that we could only avoid downtime if that happened on **ALL nodes at precisely the same time**.\n\nEvery problem uncovered other problems and as we were troubleshooting one of our production Consul servers became unresponsive, disconnected all SSH sessions, and would not allow anyone to reconnect. The server did not log any errors. It was still sending monitoring data and was still participating in the Consul cluster. If we restarted the server, then it would not have been able to reconnect to its peers and we would have an even number of nodes. Not having quorum in the cluster would have been dangerous when we went to restart all of the nodes, so we left it in that state for the moment.\n\n## Planning\n\nOnce the troubleshooting was finished [it was time to start planning](https://gitlab.com/gitlab-com/gl-infra/production/issues/1042).\n\nThere were a few ways to solve the problem. We could:\n\n* Replace the CA and the certificates with new self-signed ones.\n* Change the CA setting to point to the system store, allowing us to use certificates signed by our standard certificate provider and then replace the certificates.\n* Disable the validation of the dates so that the expired certificate would not cause connections to fail.\n\nAll of these options would incur the same risks and involve the same risky restart of all services at once.\n\nWe picked the last option. Our reasoning was that disabling the validation would eliminate the immediate risk and give us time to slowly roll out a properly robust solution in the near future, without having to worry about disrupting the whole system. It was also the [smallest and most incremental change](https://handbook.gitlab.com/handbook/values/#iteration).\n\n### Working asynchronously to tackle the problem\n\nWhile there was some time pressure due to the [risk of network connections being interrupted](https://gitlab.com/gitlab-com/gl-infra/production/issues/1037#note_201745119), we had to consider the reality of working across timezones as we planned our solution.\n\n> We decided not to hand it off to the European shift, who were coming online soon. Being a [globally distributed](/company/culture/all-remote/) team, we had already handed things off from the end of the day in Mongolia, through Eastern and Western Europe and across the Americas, and were approaching the end of the day in Hawaii and New Zealand.\n\nAustralia still had a few more hours and Mongolia had started the day again, but the folks who had been troubleshooting it throughout the day had a pretty good handle on what needed to happen and what could go wrong. It made sense for them to be the ones to do the work. We decided to make a \"Break Glass\" plan instead. This was a merge request with all of the changes and information necessary for the European shift to get us back into a good state in case a full outage happened before anyone who had been working on it woke up. Everyone slept better knowing that we had a plan that would work even if it could not be executed without causing down time. If we were already experiencing down time, there would be no problem.\n\n### Designing our approach\n\nIn the morning (HST) everything was how we left it so we started planning how to change the settings and restart all of the services without downtime. Our normal management tools were out because of the time it takes to roll out changes. Even sequential tools such as `knife ssh`, `mussh`, or `ansible` wouldn't work because the change had to be **precisely simultaneous**. Someone joked about setting it up in `cron` which led us to the standard linux `at` command (a relative of the more widely used `batch`). `cron` would require cleanup afterward but an `at` command can be pushed out ahead of time with a sequential tool and will run a command at a precise time on all machines. Back in the days of hands-on, bare metal system administration, it was a useful trick for running one-time maintenance in the middle of the night or making it look like you were working when you weren't. Now `at` has become more obscure with the trend toward managing fleets of servers rather than big monolithic central machines. We chose to run the command `sudo systemctl restart consul.service`. We tested this in staging to verify that our Ubuntu distribution made environment variables like `$PATH` available, and that `sudo` did not ask for a password. On some distributions (older CentOS especially) this is not always the case.\n\nWith those successful tests, we still needed to change the config files. Luckily, there is nothing that prevents changing these ahead of time since the changes aren't picked up until the service restarts. We didn't want to do this step at the same time as the service restart so we could validate the changes and keep the `at` command as small as possible. We decided not to use Chef to push out the change because we needed complete and immediate transparency. Any nodes that did not get the change would fail after the restart. `mussh` was the tool that offered the most control and visibility while still being able to change all hosts with one command.\n\nWe also had to disable the Chef client so that it didn't overwrite the changes between when they were written and when the service restarted.\n\nBefore running anything we also needed to address the one Consul server that we couldn't access. It likely just needed to be rebooted and would come up and be unable to reconnect to the cluster. The best option was to do this manually just before starting the rest of the procedure.\n\nOnce we had mapped out the plan we practiced it in the disaster recovery environment. We used the disaster recovery environment instead of the staging environment because all of the nodes in the staging environment had already been restarted, so there were no long-running connections to test. Making the disaster recovery environment was the next best option. It did not go perfectly since the database in this environment was already in an unhealthy state but it gave us valuable information to adjust the plan.\n\n## Pre-execution\n\n### A moment of panic\n\nIt was almost time to fix the inaccessible Consul node. The team connected in to one of the other nodes to monitor and watch logs. Suddenly, the second node started disconnecting people. It was behaving exactly like the inaccessible node had the previous day. 😱 Suspiciously, it didn't disconnect everyone. Those who were still logged in noticed that `sshguard` was blocking access to some of the bastion servers that all of our ssh traffic flows through when accessing the internal nodes: [Infrastructure#7484](https://gitlab.com/gitlab-com/gl-infra/infrastructure/issues/7484). We have three bastion servers, and two were blocked because so many of us connected so many sessions so quickly. Disabling `sshguard` allowed everyone back in and that information was the hint we needed to manually find the one bastion which hadn't yet been blocked. It got us back into the original problem server. Disabling `sshguard` there left us with a fully functional node and with the ability to accept the `at` command to restart the Consul service at exactly the same time as the others.\n\nWe verified that we had an accurate and instantaneous way to monitor the state of the services. Watching the output of the `consul operator raft list-peers` command every second gave us view that looked like this:\n\n```\nNode                Address          State     Voter  RaftProtocol\nconsul-01-inf-gprd  10.218.1.4:8300  follower  true   3\nconsul-03-inf-gprd  10.218.1.2:8300  leader    true   3\nconsul-05-inf-gprd  10.218.1.6:8300  follower  true   3\nconsul-04-inf-gprd  10.218.1.5:8300  follower  true   3\nconsul-02-inf-gprd  10.218.1.3:8300  follower  true   3\n```\n\n### More nodes, more problems\n\nEven the most thorough plans always miss something. At this point we realized that one of the three `pgbouncer` nodes which direct traffic to the correct database instance was not showing as healthy in the load balancer. One is normally in this state as a warm spare, but one of the side effects of disconnecting the `pgbouncer` nodes from Consul is that they would all fail their load balancer health checks. If all health checks are failing, GCP load balancers send requests to ALL nodes as a safety feature. This would lead to too many connections to our database servers, causing unintended consequences. We worked around this by removing the unhealthy node from the load balancer pool for the remainder of this activity.\n\n* We checked that the lag on the database replicas was zero, and that they weren't trying to replicate any large and time-consuming transactions.\n* We generated a text list of all of the nodes that run the Consul client or server.\n* We verified the time zone (UTC) and time synchronization on all of those servers to ensure that when the `at` command executed the restart, an unsynchronized clock wouldn't cause unintended behavior.\n* We also verified the `at` scheduler was running on all of those nodes, and that `sudo` would not ask for a password.\n* We verified the script that would edit the config files, and tested it against the staging environment.\n* We also made sure `sshguard` was disabled and wasn't going to lock out the scripted process for behaving like a scripted process.\n\nThis might seem like a lot of steps but without any of these prerequisites the whole process would fail. Once all of that was done, everything was ready to go.\n\n## Execution\n\nIn the end, we scheduled a maintenance window and distilled all of the research and troubleshooting down to the [steps in this issue](https://gitlab.com/gitlab-com/gl-infra/production/issues/1042).\n\nEverything was staged and it was time to make the changes. This course of action included four key steps. First, we paused the Patroni database high availability subsystem. Pausing would freeze database failover and keep the high availability configuration static until we were done. It would have been bad if we had a database failure during this time so minimizing the amount of time in this state was important.\n\nNext, we ran a script on every machine that stopped the Chef client service and then changed the verify lines in the config files from true to false. It wouldn't help to have Chef trying to reconfigure anything as we made changes. We did this using `mussh` in batches of 20 servers at a time. Any more in parallel and our SSH agent and Yubikeys may not have been able to keep up. We were not expecting change in the state of anything from this step. The config files on disk should have the new values but the running services wouldn't change, and more importantly, no TCP connections would disconnect. That was what we got so it was time for some verification.\n\nOur third step was to check all of the servers and a random sampling of client nodes to make sure config files had been modified appropriately. It was also a good time to double-check that the Chef client was disabled. This check turned out to be a good thing to do, because there were a few nodes that still had the Chef client active. It turned out that those nodes were in the middle of a run when we disabled the service, and it reenabled the service for us when the run completed. Chef can be _so_ helpful. We disabled it manually on the few machines that were affected. This delayed our maintenance window by a few minutes, so we were very glad we didn't schedule the `at` commands first.\n\nFinally, we needed to remove the inactive `pgbouncer` node from the load balancer, so when the load balancer went into its safety mode, it would only send traffic to the two that were in a known state. You might think that removing it from the load balancer would be enough, but since it also participates in a cluster via Consul the whole service needed to be shut down along with the health check, which the load balancer uses to determine whether to send it traffic. We made a note of the full command line from the process table, shut it down, and removed it from the pool.\n\n### The anxiety builds\n\nNow was the moment of truth. It was 02:10 UTC. We pushed the following command to every server (20 at a time, using `mussh`): `echo 'sudo systemctl restart consul.service' | at 02:20` – it took about four minutes to complete. Then we waited. We monitored the Consul servers by running `watch -n 1 consul operator raft list-peers` on each of them in a separate terminal. We bit our nails. We watched the dashboards for signs of db connection errors from the frontend nodes. We all held our breath, and watched the database for signs of distress. Six minutes is a long time to think: \"It's 4am in Europe, so they won't notice\" and \"It's dinner time on the US west coast, maybe they won't notice\". Trust me, six minutes is a _really_ long time: \"Sorry APAC users for your day, which we are about to ruin by missing something\".\n\nWe counted down the last few seconds and watched. In the first second, the Consul servers all shut down, severing the connections that were keeping everything working. All 255 of the clients restarted at the same time. In the next second, we watched the servers return `Unexpected response code: 500`, which means \"connection refused\" in this case. The third second... still returning \"panic now\" or maybe it was \"connection refused\"... The fourth second all nodes returned `no leader found`, which meant that the connection was not being refused but the cluster was not healthy. The fifth second, no change. I'm thinking, just breathe, they were probably all discovering each other. In the sixth second, still no change: Maybe they're electing a leader? Second seven was the appropriate time for worry and panic. Then, the eighth second brought good news `node 04 is the leader`. All other nodes healthy and communicating properly. In the ninth second, we let out a collective (and globally distributed) exhale.\n\n### A quick assessment\n\nNow it was time to check what damage that painfully long eight seconds had done. We went through our checklist:\n\n* The database was still processing requests, no change.\n* The web and API nodes hadn't thrown any errors. They must have restarted fast enough that the cached database addresses were still being used.\n* The most important metric – the graph of 500 errors seen by customers: There was no change.\n\nWe expected to see a small spike in errors, or at least some identifiable change, but there was nothing but the noise floor. This was excellent news! 🎉\n\nThen we checked whether the database was communicating with the Consul servers. It was not. Everyone quickly turned their attention to the backend database servers. If they had been running normally and the high availability tool hadn't been paused, an unplanned failover would be the minimum outage we could have hoped for. It's likely that they would have gotten into a very bad state. We started to troubleshoot why it wasn't communicating with the Consul server, but about one minute into the change, the connection came up and everything synced. Apparently it just needed a little more time than the others. We verified everything, and when everyone was satisfied we turned the high availability back on.\n\n## Cleanup\n\nNow that everything in the critical path was working as expected, we released the tension from our shoulders. We re-enabled Chef and merged the MR pinning the Chef recipes to the newer version, and the MR's CI job pushed the newer version to our Chef server. After picking a few low-impact servers, we manually kicked off Chef runs after checking the `md5sum` of the Consul client config files. After Chef finished, there was no change to the file, and the Chef client service was running normally again. We followed the same process on the Consul servers with the same result, and manually implemented it on the database servers, just for good measure. Once those all looked good, we used `mussh` to kick off a Chef run on all of the servers using the same technique we used to turn them off.\n\nNow all that was left was to straighten everything out with `pgbouncer` and the database load balancer and then we could fully relax. Looking at the heath checks, we noticed that the two previously healthy nodes were not returning healthy. The health checks are used to tell the load balancer which `pgbouncer` nodes have a Consul lock and therefore which nodes to send the traffic. A little digging showed that after retrying to connect to the Consul service a few times, they gave up. This was not ideal, so we [opened an Infrastructure issue](https://gitlab.com/gitlab-com/gl-infra/infrastructure/issues/7612) to fix it later and restarted the health checks manually. Everything showed normal so we added the inactive node back to the load balancer. The inactive node's health check told the load balancer not to select it, and since the load balancer was no longer in failsafe mode (due to the other node's health checks succeeding) the load balancer refrained from sending it traffic.\n\n## Conclusion\n\nSimultaneously restarting all of the Consul components with the new configuration put everything back into its original state, other than the validation setting which we set to false, and the TCP sessions which we restarted. After this change, the Consul clients will still be using TLS encryption but will ignore the fact that our cert is now expired. This is still not an ideal state but it gives us time to get there in a thoughtful way rather than as a rushed workaround.\n\nEvery once in a while we get into a situation that all of the fancy management tools just can't fix. There is no run book for situations such as the one we encountered. The question we were asked most frequently once people got up to speed was: \"Isn't there some instructional walkthrough published somewhere for this type of thing?\". For replacing a certificate from the same authority, yes definitely. For replacing a certificate on machines that can have downtime, there are plenty. But for keeping traffic flowing when hundreds of nodes need to change a setting and reconnect within a few seconds of each other... that's just not something that comes up very often. Even if someone wrote up the procedure it wouldn't work in our environment with all of the peripheral moving parts that required our attention.\n\nIn these types of situations there is no shortcut around thinking things through methodically. In this case, there were no tools or technologies that could solve the problem. Even in this new world of infrastructure as code, site reliability engineering, and cloud automation, there is still room for old fashioned system administrator tricks. There is just no substitute for understanding how everything works. We can try to abstract it away to make our day-to-day responsibilities easier, but when it comes down to it there will always be times when the best tool for the job is a solid plan.\n\nCover image by [Thomas Jensen](https://unsplash.com/@thomasjsn?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com)\n{: .note}\n",[1286],{"slug":5798,"featured":6,"template":678},"the-consul-outage-that-never-happened","content:en-us:blog:the-consul-outage-that-never-happened.yml","The Consul Outage That Never Happened","en-us/blog/the-consul-outage-that-never-happened.yml","en-us/blog/the-consul-outage-that-never-happened",{"_path":5804,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5805,"content":5811,"config":5815,"_id":5817,"_type":16,"title":5818,"_source":17,"_file":5819,"_stem":5820,"_extension":20},"/en-us/blog/how-gitlab-pages-made-our-sketch-design-handoffs-easier-and-faster",{"title":5806,"description":5807,"ogTitle":5806,"ogDescription":5807,"noIndex":6,"ogImage":5808,"ogUrl":5809,"ogSiteName":692,"ogType":693,"canonicalUrls":5809,"schema":5810},"How GitLab Pages made our Sketch design handoffs easier and faster","From designer to developer hands, here is our workflow for sharing Sketch design specs using GitLab Pages.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749684206/Blog/Hero%20Images/cover.jpg","https://about.gitlab.com/blog/how-gitlab-pages-made-our-sketch-design-handoffs-easier-and-faster","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab Pages made our Sketch design handoffs easier and faster\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Pedro Moreira da Silva\"}],\n        \"datePublished\": \"2019-11-07\",\n      }",{"title":5806,"description":5807,"authors":5812,"heroImage":5808,"date":5813,"body":5814,"category":14},[5678],"2019-11-07","\n\nOne of the designer’s responsibilities is handing off the design to developers\nso that it gets implemented as intended. We have _design specs_ to make this process smoother, but these have their own set of challenges. How can we speed\nup and ensure an effortless transition from designer to developer, so that\npeople can focus on what they do best? Here’s how we’re doing it with a\ncombination of [Sketch Measure][sketch-measure] and [GitLab Pages][gl-pages].\n\n\u003C!-- more -->\n\nAlong the development, it’s natural that developers have questions and the\ndesigner should be there to answer them. A lot of these questions can be\nanswered by the design specs, which mainly include detailed instructions about\nvisual aspects, but can also have functional aspects. This guidance is a\ncrucial part of handing off a design for implementation. Without it, the\ndeveloper has to deal with a lot of guessing and manual work, which is not the\nideal workflow.\n\n## Our workflow at GitLab\n\n\n\nFor more than three years we have been using a specific workflow, made of open\nsource tools, to power our Sketch design handoffs. This workflow is how we\nhandoff designs to build not only our product but also the [Pajamas Design\nSystem](http://design.gitlab.com). In a nutshell, here's how that workflow\nusually happens:\n1. Use the [Sketch Measure][sketch-measure] plugin to generate specs from the\n   designs we have created in [Sketch](https://www.sketchapp.com) (our user\n   interface design software of choice).\n1. Commit design spec files to our [GitLab Design][gl-design] project on\n   GitLab.com, using Git.\n1. Once the files arrive on GitLab.com, GitLab's [continuous integration\n   (CI)][gl-ci] engine starts and triggers the [GitLab Pages][gl-pages]\n   feature.\n1. GitLab Pages then publishes an online [index page](https://gitlab-org.gitlab.io/gitlab-design) with all of the design specs.\n1. Designers can now easily share a URL with the design specs, that is always\n   up-to-date and that anyone can access.\n\nUsing this design handoff workflow we’ve been able to improve efficiency and\ncommunication between designers and developers. Having these tools in place and\ncontinously going with this workflow is one of our first steps towards\npracticing\n[DesignOps](https://medium.com/vmwaredesign/understanding-the-value-of-designops-e2477330a923):\n“principles and processes to assist designers in becoming more productive and\ncollaborative.” Arguably more about tooling and automation, this workflow also\nblends three other important pillars that make high-performance design teams:\nprocess, standardization, and collaboration.\n\n## Creating design specs\n\nThe first step in our design handoff workflow is getting our design specs,\nusing [Sketch Measure][sketch-measure]. Sketch Measure is a free, open source\nplugin for Sketch that automatically generates specs from your Sketch designs.\nWith one click, it outputs an HTML page that allows anyone to inspect all the\ndesign details, including CSS styles. This HTML page can be viewed online or\noffline, in all of the major browsers – [check out a\ndemo](https://utom.design/news/).\n\nYou can even add implementation notes to your design, so developers don’t have\nto look in different places for the information they need.\n\n![Example of an HTML page with the specs generated by Sketch Measure](https://about.gitlab.com/images/blogimages/how-gitlab-pages-made-our-sketch-design-handoffs-easier-and-faster/sketch-measure-example.png){: .shadow.center}\nExample of an HTML page with the specs generated by Sketch Measure.\n{: .note.text-center}\n\nJust by using this plugin, we’re already saving money and time:\n\n- We save money on Sketch licenses because only designers need Sketch. For us\n  at GitLab, that means saving $3540 per year (we currently have around [60\n  people on the frontend team](/company/team/)).\n- We save time because developers don’t have to download Sketch, install\n  Sketch, learn Sketch, download files, open files, etc.\n- We save more time because designers and developers are now focused on doing\n  what they do best, instead of handling miscommunications or manually\n  marking/writing all of the specs.\n- And our developers can use their favorite operating system, no need to use\n  macOS or have a macOS virtual machine just to open Sketch files.\n\nOther tools also generate design specs from Sketch files, but we settled on\nSketch Measure because it’s free, open source, and allows us to save and\nversion control the design specs in GitLab. If you’re interested in\nalternatives to Sketch Measure, see\n[Marketch](https://github.com/tudou527/marketch) (a plugin similar to Sketch\nMeasure), [Abstract](https://www.abstract.com/),\n[Avocode](https://avocode.com/), [InVision\nInspect](https://www.invisionapp.com/feature/inspect),\n[Markly](https://marklyapp.com/), [Marvel](https://marvelapp.com/sketch/), or\n[Zeplin](https://zeplin.io/). In June 2019, [Sketch\nannounced](https://blog.sketchapp.com/sketch-for-teams-smart-layout-and-more-announcements-from-layers-eed45e3fa0fd)\nthat they’re “bringing an Inspector to Cloud, so you can grab code, view specs\nand download production ready assets, without having to open the Mac app,” so a\nSketch-native solution may be coming soon.\n\n## Sharing design specs with GitLab Pages\n\nNow that we have the design specs, the second step is getting the specs into\nthe developer’s hands. Wouldn’t it be great if you could just access a URL\nthat’s always up-to-date?\n\nEnter [GitLab Pages][gl-pages]. It’s a feature that creates static websites for\nyour GitLab projects, groups, or user account. You can use it for free on\nGitLab.com or [your own GitLab\ninstance](https://docs.gitlab.com/ee/administration/pages/). It supports any\nstatic website generator, but in our case, we just need it to host our simple\nHTML file with the design specs.\n\nIn our UX department, we use the Git repository of the [GitLab\nDesign][gl-design] project to store and host not only the design specs but also\neditable Sketch files. Every time someone adds or changes design specs in the\nGitLab design repository, the GitLab Pages feature updates the project’s\nwebsite automatically.\n\nThe automatic updates is possible thanks to a simple YML file in the repository\nthat gives instructions to our [continuous integration (CI)][gl-ci] feature,\nthe engine that powers GitLab Pages. In essence, that file tells CI to run\nGitLab Pages each time something is pushed into our default branch. If you’re\ninterested in how it works, we’ve added comments to the file that try to\nexplain it as simply as possible:\n[`.gitlab-ci.yml`](https://gitlab.com/gitlab-org/gitlab-design/blob/master/.gitlab-ci.yml)\n\nThe final result is an [index page](https://gitlab-org.gitlab.io/gitlab-design)\nwith links to the different design specs that are generated, as seen in the\nimage below (feel free to explore). Since making this work for design specs,\nwe’ve also added support for [Framer prototypes](https://framer.com/) and plain\nHTML pages.\n\n![Screenshot of the GitLab Design index page](https://about.gitlab.com/images/blogimages/how-gitlab-pages-made-our-sketch-design-handoffs-easier-and-faster/gitlab-design-index.png){: .shadow.center.medium}\n\nBy setting up GitLab Pages with GitLab CI, just push your specs to GitLab and\nthe rest is magic. The specs are available to anyone through a URL and are easy\nto share anywhere.\n\nFor us, this means more savings!\n- We save time on manually uploading design specs as these are automatically\n  taken care of by GitLab Pages each time our designers commit to the\n  repository.\n- We save time if something goes wrong with the design files or design specs\n  because we can go back in time and revert to a better past version.\n- And naturally, we save even more time as we all use GitLab for the whole\n  software development lifecycle. Instead of a bunch of ad-hoc tools from\n  different vendors, GitLab provides you everything you need in an integrated\n  product.\n\nHere are other cool things you can do with GitLab to make design handoffs even\nbetter:\n\n### Mention issues in commit messages for transparency\n\nBy mentioning the GitLab issue in the commit message, a note is created in the\nissue to let everyone know that the specs are available or have been updated\n([live example](https://gitlab.com/gitlab-org/gitlab/issues/13490#note_220761012)).\nThis also makes it easy for people looking at the issue to contribute and fork\nthe design (especially important if someone on the team is out-of-office), by\nfollowing the link to the commit. This [crosslinking\nfeature](https://docs.gitlab.com/ee/user/project/issues/crosslinking_issues.html#from-commit-messages)\nnot only supports issues but also epics and merge requests.\n\n![Example of an issue note linking to the referencing commit](https://about.gitlab.com/images/blogimages/how-gitlab-pages-made-our-sketch-design-handoffs-easier-and-faster/commit-system-note.png){: .shadow.center}\n\nTo automatically mention issues in commit messages, we’ve created a\n[`prepare-commit-msg`](https://gitlab.com/gitlab-org/gitlab-design/blob/master/hooks/prepare-commit-msg)\nGit hook. Once the Git hook is installed in your local repository, every time\nyou commit, the hook will add the issues, epics, and merge requests IDs found\non the staged files (and their folders) to the commit message body. For\nexample, if a file or folder you’re committing contains `gitlab#1337-…` in its\nname, the commit message body will be appended with `gitlab#1337`, which\nreferences the project handle for\n[GitLab](https://gitlab.com/gitlab-org/gitlab) and its issue #1337.\n\n### Look at previous versions of specs\n\nIf you want, [GitLab’s CI][gl-ci] can save all versions of the design specs,\nusing [job\nartifacts](https://docs.gitlab.com/ee/ci/pipelines/job_artifacts.html).\nThis means you can go back in time and see what a previous version looked like\ninside of GitLab, without having to clone, revert, or download anything.\nEverything can be saved and viewed online. For example, here’s a [previous\nversion of our design specs index\npage](https://gitlab-org.gitlab.io/-/gitlab-design/-/jobs/299229231/artifacts/public/index.html),\nfrom September 2019, when Kyle updated some Sketch files and their specs ([CI\npipeline](https://gitlab.com/gitlab-org/gitlab-design/pipelines/83198578)).\n\nAlso, because Git is used for version control, you can browse the whole history\nof changes made. No change is ever lost.\n\n## Future improvements\n\nWe hope this automated way of doing design handoffs pushes our UX department to continue exploring the role of DesignOps at GitLab,\nnot only inside the company but also in the product itself.\n\nSpecifically about design specs, here are some things we are looking into:\n- [Make it easier to share links to design specs in\n  GitLab](https://gitlab.com/gitlab-org/gitlab/issues/32704).\n- [Improve the index page with sorting, filters, easy to copy URLs, and links\n  to issues/epics/merge requests](https://gitlab.com/gitlab-org/gitlab-design/issues/638).\n\nBecause GitLab’s CI is so awesome and flexible, we are even thinking about\nusing it to automatically update our SVG icons every time we update them in our\nSketch UI Kit\n([gitlab-svgs#1](https://gitlab.com/gitlab-org/gitlab-svgs/issues/1)).\n\nAs we mentioned earlier, we don’t use this design handoff workflow exclusively\nfor design specs. It’s the way our designers communicate _any kind of\ndeliverable_ with our developers. The deliverables can also be Framer\nprototypes or plain HTML pages. We hope that by sharing design handoff workflow\nwe can help you and your team also work better and faster!\n\nThanks to [Dimitrie Hoekstra](/company/team/#dimitrieh) for the initial setup\nthat allowed this workflow and [Alessio Caiazza](/company/team/#nolith), [Marin\nJankovski](/company/team/#marin), [Rémy Coutable](/company/team/#rymai), and\n[Juan J. Ramirez](/company/team/#jj-ramirez) for the continued technical\nsupport and keeping this [repository’s\nsuperpowers](https://gitlab.com/gitlab-org/gitlab-design/blob/master/CONTRIBUTING.md#superpowers-)\nawesome.\n\nIf you have any questions or suggestions, feel free to [post a comment\non the community forum](https://forum.gitlab.com/new-topic?tags=blog-feedback), [tweet at us](https://twitter.com/gitlab), or create an\nissue in the [GitLab Design][gl-design] project.\n\n",{"slug":5816,"featured":6,"template":678},"how-gitlab-pages-made-our-sketch-design-handoffs-easier-and-faster","content:en-us:blog:how-gitlab-pages-made-our-sketch-design-handoffs-easier-and-faster.yml","How Gitlab Pages Made Our Sketch Design Handoffs Easier And Faster","en-us/blog/how-gitlab-pages-made-our-sketch-design-handoffs-easier-and-faster.yml","en-us/blog/how-gitlab-pages-made-our-sketch-design-handoffs-easier-and-faster",{"_path":5822,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5823,"content":5829,"config":5835,"_id":5837,"_type":16,"title":5838,"_source":17,"_file":5839,"_stem":5840,"_extension":20},"/en-us/blog/adsoul-devops-transition-to-gitlab-ci",{"title":5824,"description":5825,"ogTitle":5824,"ogDescription":5825,"noIndex":6,"ogImage":5826,"ogUrl":5827,"ogSiteName":692,"ogType":693,"canonicalUrls":5827,"schema":5828},"How adSoul transitioned to GitLab CI from Jenkins","adSoul, a marketing automation company, outlines a successful three-phase migration plan for moving to GitLab CI from Jenkins.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678442/Blog/Hero%20Images/londoncommit.png","https://about.gitlab.com/blog/adsoul-devops-transition-to-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How adSoul transitioned to GitLab CI from Jenkins\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brein Matturro\"}],\n        \"datePublished\": \"2019-11-05\",\n      }",{"title":5824,"description":5825,"authors":5830,"heroImage":5826,"date":5832,"body":5833,"category":14,"tags":5834},[5831],"Brein Matturro","2019-11-05","\nadSoul is a Germany-based data-driven online marketing company that aims to improve search engine advertising and scalability for businesses. The core of adSoul relies heavily on API interfaces and entity recognition to post keywords on Google and Bing with marketing automation. \n\nAt GitLab Commit London, [Philipp Westphalen](https://www.linkedin.com/in/philipp-westphalen-a83318188/), fullstack developer at adSoul and GitLab Hero, shares how the company transitioned from Jenkins to GiLab CI. adSoul is a startup company with five developers, and as Philipp says “We literally have no time for everything we need to do.” They were looking for a tool that requires less time-consuming maintanence, and with Jenkins the team found it hard to read their existing files. “Our Jenkins was not so stable at all and it was tough to change because it was managed by our provider,” Philipp says. Cost and visibility were also huge motivators in moving away from [Jenkins to Gitlab CI](/blog/docker-my-precious/).\n\n## GitLab migration in three phases\n\nPhase 1: Move the repository.\nThe [adSoul team](https://www.adsoul.com) used the GitHub Import by GitLab, but had setbacks with migrating their issues, so they created a GitHub open source issue migrator as a resolution. Following that, they modified scripts with the new origin by exchanging the GitHub API call with a GitLab API. “This was really easy and we had a stable build with our new repository, so we could move our product management to GitLab and not need GitHub anymore,” Philipp says.\n\nPhase 2: Migrate the CI/CD pipeline.\nThe team started to create a GitLab CI YAML and tried to do a simple ‘lift and shift,’ however their processes were more complicated than anticipated. Though this phase was time consuming, it became clear the team could move to phase three without hiccups. “Quick pro tip,” says Philipp. “If you’re running your own GitLab runners, increase the log limit if you have to debug your building step.” \n\nPhase 3: Improve the CI/CD pipeline.\nThe team thought about ways of building their software, so they split projects into steps. “Our idea was that one job does one thing perfectly. Each job is simple and everyone can modify it easily” Philipp says. They improved their build time by moving to Gradle, created parallel job processing, and by using standard Docker images for ease of management. \n\n## Takeaways from a successful migration\n\n1. Plan your migration. Get every member of the team involved and aware of the upcoming changes, including how tools are working together and what the expectations are moving forward. “Take your time for the migration,” Philipp says. “It’s not two days and then we are finished.” \n\n2. Go step by step. adSoul used a three phase plan which allowed the team to deploy a new version and still continue to work on existing projects. “We could improve our application without having to wait for a better infrastructure,” Philipp says.\n\n3. Rethink your [DevOps strategy](/blog/better-devops-with-gitlab-ci-cd/). In the time leading up to the migration, examine things like security automation and other important pieces in a DevOps overall strategy.\n\n4. Start with a small project. Work closely with colleagues to create small GitLab CI projects to familiarize everyone before creating larger, overwhelming projects.\n\nPro tip: Keep your pipeline user friendly. Create a good user experience for the team with clear job names, style your config for a better overview, and write comments for variables and hard to understand code. \n\n## Why GitLab works for a small team\n\n“The most important thing is that GitLab is a powerful CI/CD solution with high customization,” Philipp says. There is one home for all projects, without dependencies on one another. With Jenkins, even small exploratory changes can impact the larger job. “With GitLab, you don’t have dependency between branches. So, if you’re trying something new for your CI, you can do it simply in your branch and the master branch will not be affected by the changes,” Philipp says.\n\nThe CI is low maintenance, which is a useful timesaver for a smaller team. “The CI provides us with really low maintenance time. So, usually we don’t have to care about our CI for a month or more,” Philipp says.\n\nTo learn more about adSoul’s migration to GitLab, watch Philipp’s talk from GitLab Commit London.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/C5xfw0ydh2k\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[110,894,703,704,4441,873],{"slug":5836,"featured":6,"template":678},"adsoul-devops-transition-to-gitlab-ci","content:en-us:blog:adsoul-devops-transition-to-gitlab-ci.yml","Adsoul Devops Transition To Gitlab Ci","en-us/blog/adsoul-devops-transition-to-gitlab-ci.yml","en-us/blog/adsoul-devops-transition-to-gitlab-ci",{"_path":5842,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5843,"content":5849,"config":5855,"_id":5857,"_type":16,"title":5858,"_source":17,"_file":5859,"_stem":5860,"_extension":20},"/en-us/blog/reviewer-roulette-one-year-on",{"title":5844,"description":5845,"ogTitle":5844,"ogDescription":5845,"noIndex":6,"ogImage":5846,"ogUrl":5847,"ogSiteName":692,"ogType":693,"canonicalUrls":5847,"schema":5848},"Reviewer Roulette: (Just about) one year on","Learn how Reviewer Roulette has evolved at GitLab over the last year.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672195/Blog/Hero%20Images/play-reviewer-roulette.jpg","https://about.gitlab.com/blog/reviewer-roulette-one-year-on","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Reviewer Roulette: (Just about) one year on\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nick Thomas\"}],\n        \"datePublished\": \"2019-10-23\",\n      }",{"title":5844,"description":5845,"authors":5850,"heroImage":5846,"date":5852,"body":5853,"category":14,"tags":5854},[5851],"Nick Thomas","2019-10-23","\nJust over a year ago, [Dennis Tang](/company/team/#dennis)\nintroduced us to [Reviewer Roulette](/blog/play-reviewer-roulette/).\nThis was a shiny new tool designed to help us to find reviewers for our code.\nAt the time, our engineering department had around 150 people in it. At GitLab,\n[all our engineers are reviewers](/handbook/engineering/workflow/code-review/#reviewer),\nbut reviews were being unevenly distributed across them.\n\nA year on, and with more than 380 people in engineering available to review,\nwe're still using a form of Reviewer Roulette – but its implementation, and how\nwe interact with it, has changed significantly. So, what's changed, and what's\nstayed the same?\n\n## The good\n\nFirst off, roulette works really well. Code reviews can be time-consuming, and\nthey're a major part of quality control at GitLab, so it's crucial that we\ndistribute the load – research shows that [review quality nosedives](https://smartbear.com/learn/code-review/best-practices-for-peer-code-review/)\nif you spend too much time doing it. It's even more\nimportant for our maintainers. We try to maintain a ratio of engineers to maintainers of around\n4:1, but if half of the reviews go to a quarter of the maintainers, some will\nexperience it as 6:1, while others will experience it as 2:1.\n\nAlso, people could become familiar with certain reviewers and maintainers and\nhabitually assign their work to the same people. This means that people who had\nbeen maintainers for longer tended to get more reviews. Without the\nrandomization effect of Reviewer Roulette, this led to the creation of knowledge\nsilos, where knowledge about a particular subject would be concentrated in a few\nindividuals, rather than being spread across the organization.\n\nRoulette solved this for us with almost no cognitive load, and could scale\neffortlessly as our engineering team expands significantly. Sometimes, I first\nlearned someone new had joined the company through a review suggestion. The\nnumber and type of reviews a merge request needed was also increasing – I might\nneed to find a reviewer and maintainer for frontend, backend, QA, database,\ndocumentation, and UX concerns before merging. It's a lot to keep track of\nmanually!\n\n## The bad\n\nDespite the advantages of Reviewer Roulette, I used it inconsistently after a\nfew months, and never actually contributed any improvements to the code. The\nintegration with Slack didn't fit my workflow very well because a chat channel\nis the last place I want to be when working on code! I like to treat Slack as\nthe [informal, asynchronous](/handbook/communication/#slack) communication\nchannel it is designed to be, but it is too easy to be sidetracked by ongoing\nconversations when popping in to get a reviewer recommendation.\n\nThen, we began running into deployment problems, and sometimes Reviewer Roulette\njust wasn't available at all. It only took a few failed attempts before I fell\nout of the habit of trying to use it, and we never did get around to making the\ndeployment work with Auto DevOps.\n\nIt turns out that I wasn't the only one having trouble with this iteration of Reviewer Roulette – we found\nthat backend reviews were [very unevenly distributed](https://gitlab.com/gitlab-org/gitlab-foss/issues/53119#note_111796691). Reviewer Roulette wasn't being used widely enough across GitLab for us to experience\nall the benefits, and as we geared up to add many more maintainers, fixing\nthis tool became very important.\n\n## The fix\n\nIn the interim, staff backend engineer on Delivery, [Yorick Peterse](/company/team/#yorickpeterse), introduced\n[Danger bot](https://github.com/danger/danger) into GitLab's CI pipeline and\nused it to enforce a fine set of coding standards that we couldn't quite express\nwith Rubocop.\n\nThe new bot would leave polite messages on our MRs, asking us to write\n[better commit messages](https://docs.gitlab.com/ee/development/contributing/merge_request_workflow.html#commit-messages-guidelines),\nor to seek database review if we'd changed any files in `db/`. That last part got me\nthinking: Why couldn't the Danger bot pick a potential database reviewer for us at the same\ntime? What was stopping it from detecting backend, frontend, or documentation\nchanges, and using Reviewer Roulette to choose reviewers and maintainers right there in\nthe merge request?\n\n[Very little, it turned out](https://gitlab.com/gitlab-org/gitlab/merge_requests/13506#note_175449376):\n\n![Reviewer Roulette in Action](https://about.gitlab.com/images/blogimages/roulette-review.jpg)\n\nBy making Reviewer Roulette happen automatically in the merge request itself, we\nremoved all the barriers that were preventing us from using the tool. I no longer had to be\non Slack to find a reviewer, instead the list was right there in the merge request as\nI went to change the assignee. Danger was guaranteed to run on every pipeline –\nthere were no deployments or environments to worry about, and if it broke,\nfixing it was automatically [high priority](/handbook/engineering/workflow/#broken-master).\n\nContributing changes also became much easier – the code was right there in the\nGitLab repository, and changes took effect immediately (again, no deployments!).\n\n## What's next?\n\nThe ChatOps version of Reviewer Roulette needed access to GitLab's Slack\nworkspace to use and so it wasn't available to most of our community contributors\nbeyond the [core team](/handbook/marketing/developer-relations/core-team/). Moving Reviewer Roulette to Danger doesn't really solve this\nproblem – it doesn't work well on forks of the `gitlab-org/gitlab` project so\ncommunity contributors don't benefit. This problem is something I'd really\nlike to fix in the future, not least because I work on a fork of GitLab\nday-to-day as well.\n\nDanger is a good tool but it does have [some limitations](https://docs.gitlab.com/ee/development/dangerbot.html) –\nin particular, [`danger local`](https://danger.systems/guides/troubleshooting.html#i-want-to-work-locally-on-my-dangerfile)\ndoesn't work for GitLab. This slows down development, since you have to commit\nand push changes to your merge request before you can see the effects.\n\nAnother big problem is that this most recent iteration of Reviewer Roulette only\nworks for the `gitlab` project. None of our satellite projects - `gitaly`,\n`gitlab-workhorse`, `gitlab-pages`, `gitlab-runner`, etc. – can use this\nversion of Reviewer Roulette. Similarly, [users of GitLab haven't\nbenefited from the work we've been doing on Roulette](https://gitlab.com/gitlab-org/gitlab-foss/merge_requests/24938#note_141874188).\nIdeally, we would have built this as a feature within GitLab itself, so everyone\ncould benefit from the tool.\n\nBy building Reviewer Roulette in Danger we've been able to protype and rapidly iterate\nto a solution that is working very well for the `gitlab` project. The next steps\nare to turn Reviewer Roulette [into a feature](https://gitlab.com/groups/gitlab-org/-/epics/1823) that all users of GitLab can benefit from, perhaps by leveraging the [CODEOWNERS file](https://gitlab.com/gitlab-org/gitlab/issues/12137).\n\nDo you have any ideas on how we can better integrate Reviewer Roulette into GitLab? Let us know by commenting [in the epic](https://gitlab.com/groups/gitlab-org/-/epics/1823)\nor by opening a new issue!\n\n[Cover photo](https://unsplash.com/photos/w6OniVDCfn0) by Krissia Cruz on [Unsplash](https://unsplash.com/search/photos/roulette).\n{: .note}\n",[1084,727,894],{"slug":5856,"featured":6,"template":678},"reviewer-roulette-one-year-on","content:en-us:blog:reviewer-roulette-one-year-on.yml","Reviewer Roulette One Year On","en-us/blog/reviewer-roulette-one-year-on.yml","en-us/blog/reviewer-roulette-one-year-on",{"_path":5862,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5863,"content":5868,"config":5874,"_id":5876,"_type":16,"title":5877,"_source":17,"_file":5878,"_stem":5879,"_extension":20},"/en-us/blog/database-case-study-store-and-update-namespace-statistics",{"title":5864,"description":5865,"ogTitle":5864,"ogDescription":5865,"noIndex":6,"ogImage":3194,"ogUrl":5866,"ogSiteName":692,"ogType":693,"canonicalUrls":5866,"schema":5867},"Store and update namespace statistics in a performant manner","Explore all the different engineering approaches to store and update the namespace statistics in a performant manner.","https://about.gitlab.com/blog/database-case-study-store-and-update-namespace-statistics","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Store and update namespace statistics in a performant manner\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mayra Cabrera\"}],\n        \"datePublished\": \"2019-10-14\",\n      }",{"title":5864,"description":5865,"authors":5869,"heroImage":3194,"date":5871,"body":5872,"category":14,"tags":5873},[5870],"Mayra Cabrera","2019-10-14","\nManaging storage space on large GitLab instances, such as GitLab.com, can be a challenge. At the moment, we only have a restriction on repository limits, but no restriction on most of the other items that can consume storage space: wiki, lfs objects, artifacts, and packages, to mention a few.\n\nWe want to facilitate a method for easily viewing the amount of storage consumed by a group and allow easy management on GitLab.com by setting [storage and limits management for groups](https://gitlab.com/groups/gitlab-org/-/epics/886). But to do that we need a way to track the statistics of a namespace, whether it is a Group or a User namespace.\n\n## Proposal to track the statistics of a namespace\n\n1. Create a new ActiveRecord model to hold the namespaces' statistics in an aggregated form: Only for root namespaces.\n2. Refresh the statistics in this model every time a project belonging to this namespace is changed.\n\nThe \"refresh\" part is the tricky one. Currently we don't have a pattern to update/refresh the namespace statistics every time a project belonging to the namespace is updated.\n\nWe refreshed projects statistics in the following way:\n\n1. We have a model called `ProjectStatistics`,\n2. The records on `ProjectStatistics` are updated through a [callback](https://gitlab.com/gitlab-org/gitlab-ce/blob/v12.2.0.pre/app/models/project.rb#L90) every time the project is saved.\n3. The summary of those statistics per namespace is then retrieved by [`Namespaces#with_statistics`](https://gitlab.com/gitlab-org/gitlab-ce/blob/v12.2.0.pre/app/models/namespace.rb#L70) scope.\n\nAnalyzing this query we noticed that:\n\n- It takes up to `1.2` seconds for namespaces with over `15 000` projects.\n- Any attempt to run `EXPLAIN ANALYZE` results in query timeouts (15 seconds) when using our internal tooling.\n\nAdditionally, the callback to update the project statistics doesn't scale. It is currently one of the most [frequently run and expensive database queries](https://gitlab.com/gitlab-org/gitlab-ce/issues/62488) on GitLab.com. We can't add one more query to it as\nit will increase the transaction's length.\n\nBecause of these reasons, we can't apply the same pattern to store\nand update the namespaces' statistics, as the `namespaces` table is one\nof the largest tables on GitLab.com. Therefore, we have to find a performant and\nalternative method.\n\n## Our Attempts\n\n### Attempt A: PostgreSQL materialized view\n\nUpdate the ActiveRecord model with a refresh strategy based on project routes and a [materialized view](https://www.postgresql.org/docs/9.6/rules-materializedviews.html):\n\n```sql\nSELECT split_part(\"rs\".path, '/', 1) as root_path,\n        COALESCE(SUM(ps.storage_size), 0) AS storage_size,\n        COALESCE(SUM(ps.repository_size), 0) AS repository_size,\n        COALESCE(SUM(ps.wiki_size), 0) AS wiki_size,\n        COALESCE(SUM(ps.lfs_objects_size), 0) AS lfs_objects_size,\n        COALESCE(SUM(ps.build_artifacts_size), 0) AS build_artifacts_size,\n        COALESCE(SUM(ps.packages_size), 0) AS packages_size\nFROM \"projects\"\n    INNER JOIN routes rs ON rs.source_id = projects.id AND rs.source_type = 'Project'\n    INNER JOIN project_statistics ps ON ps.project_id  = projects.id\nGROUP BY root_path\n```\n\nWe could then execute the query with:\n\n```sql\nREFRESH MATERIALIZED VIEW root_namespace_storage_statistics;\n```\n\nWhile this implied a single query update, it has some downsides:\n\n- The query itself would not be fast, as it would need to update all the statistics every time it runs. Execution time of this query will increase as the number of namespaces and projects grow.\n- Materialized views syntax varies from PostgreSQL and MySQL. At the time this feature was worked on, [GitLab still supported MySQL, which it now no longer supports.](/blog/removing-mysql-support/).\n- Rails does not have native support for materialized views. We'd need to use a specialized gem to take care of the management of the database views, which implies additional work.\n\n### Attempt B: An update through a CTE\n\nUpdate the ActiveRecord model with a refresh strategy through a [Common Table Expression](https://www.postgresql.org/docs/9.1/queries-with.html).\n\n```sql\nWITH refresh AS (\n  SELECT split_part(\"rs\".path, '/', 1) as root_path,\n        COALESCE(SUM(ps.storage_size), 0) AS storage_size,\n        COALESCE(SUM(ps.repository_size), 0) AS repository_size,\n        COALESCE(SUM(ps.wiki_size), 0) AS wiki_size,\n        COALESCE(SUM(ps.lfs_objects_size), 0) AS lfs_objects_size,\n        COALESCE(SUM(ps.build_artifacts_size), 0) AS build_artifacts_size,\n        COALESCE(SUM(ps.packages_size), 0) AS packages_size\n  FROM \"projects\"\n        INNER JOIN routes rs ON rs.source_id = projects.id AND rs.source_type = 'Project'\n        INNER JOIN project_statistics ps ON ps.project_id  = projects.id\n  GROUP BY root_path)\nUPDATE namespace_storage_statistics\nSET storage_size = refresh.storage_size,\n    repository_size = refresh.repository_size,\n    wiki_size = refresh.wiki_size,\n    lfs_objects_size = refresh.lfs_objects_size,\n    build_artifacts_size = refresh.build_artifacts_size,\n    packages_size  = refresh.packages_size\nFROM refresh\n    INNER JOIN routes rs ON rs.path = refresh.root_path AND rs.source_type = 'Namespace'\nWHERE namespace_storage_statistics.namespace_id = rs.source_id\n```\n\nUnlike Attempt A, a CTE will be limited to the namespace we care about instead of operating on all namespaces. The downside of it,\nis that earlier versions of MySQL do not support Common Table Expressions.\n\n### Attempt C: Get rid of the model and store the statistics on Redis\n\nWe could get rid of the model that stores the statistics in aggregated form and instead use a Redis Set.\nThis would be the [boring solution](https://handbook.gitlab.com/handbook/values/#boring-solutions) and the fastest one\nto implement, as GitLab already includes [Redis](https://docs.gitlab.com/ee/development/architecture.html#redis) as part of its Architecture.\n\nThe downside of this approach is that Redis does not provide the same persistence/consistency guarantees as PostgreSQL,\nand the namespace statistics are information we can't afford to lose in a case of a Redis failure. Also, searching for\ninformation like the largest namespaces per repository size will be easier to do in PostgreSQL than in Redis.\n\n### Attempt D: Tag the root namespace and its child namespaces\n\nDirectly relate the root namespace to its child namespaces, so\nwhenever a child namespace is created, it's also tagged with the\nroot namespace ID:\n\n| id | root_id | parent_id\n|:---|:--------|:----------\n| 1  | 1       | NULL\n| 2  | 1       | 1\n| 3  | 1       | 2\n\nTo aggregate the statistics inside a namespace we'd execute something like:\n\n```sql\nSELECT COUNT(...)\nFROM projects\nWHERE namespace_id IN (\n  SELECT id\n  FROM namespaces\n  WHERE root_id = X\n)\n```\n\nEven though this approach would make aggregating much easier, it has some major downsides:\n\n- We'd have to migrate **all namespaces** by adding and filling a new column. Because of the size of the table, dealing with the time/cost will not be great. The [background migration will take approximately 153 hours](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/29772#note_182201607).\n- The background migration has to be shipped one release before we want to start using the new data, delaying the functionality by another milestone.\n\n### Attempt E: Update the namespace storage statistics asynchronously\n\nFor this approach we continue using the incremental statistics updates we already have,\nbut we refresh them through Sidekiq jobs and in different SQL transactions:\n\n1. Create a second table (`namespace_aggregation_schedules`) with two columns `id` and `namespace_id`.\n1. Whenever the statistics of a project changes, insert a row into `namespace_aggregation_schedules`\n   - We don't insert a new row if there's already one related to the root namespace.\n   - Keeping in mind the length of the transaction that involves [updating `project_statistics`](https://gitlab.com/gitlab-org/gitlab-ce/issues/62488), the insertion should be done in a different transaction and through a Sidekiq Job.\n1. After inserting the row, we schedule another worker to be executed async at two different moments:\n   - One enqueued for immediate execution and another one scheduled in `1.5h` hours.\n   - We only schedule the jobs if we can obtain a `1.5h` lease on Redis on a key based on the root namespace ID.\n   - If we can't obtain the lease it indicates there's another aggregation already in progress or scheduled in no more than `1.5h`.\n1. This worker will:\n   - Update the root namespace storage statistics by querying all the namespaces through a service.\n   - Delete the related `namespace_aggregation_schedules` after the update.\n1. Another Sidekiq job is also included to traverse any remaining rows on the `namespace_aggregation_schedules` table and schedule jobs for every pending row.\n   - This job is scheduled with cron to run every night (UTC).\n\nThis implementation has the following benefits:\n\n- All the updates are done async, so we're not increasing the length of the transactions for `project_statistics`.\n- We're doing the update in a single SQL query.\n- It is compatible with PostgreSQL and MySQL.\n- No background migration is required.\n\nThe downsides of this approaches are:\n\n* Namespaces' statistics are updated up to `1.5` hours after the change is done, which means there's a brief window in time where the statistics are inaccurate. This is not a major problem because we're not currently [enforcing storage limits](https://gitlab.com/gitlab-org/gitlab-ce/issues/30421).\n* From the implementation perspective, this approach is more complex than the migration approach (Attempt D).\n* `namespace_aggregation_schedules` table will see a high rate of inserts and deletes, which may require that we tune auto vacuuming for this table.\n\nWe went with *Attempt E* because updating the storage statistics asynchronously was the less problematic and\nperformant approach of aggregating the root namespaces.\n\n## Enabling the feature on GitLab.com\n\nGiven this is a performance improvement, we have to be very careful introducing this change to GitLab.com: Which is why\nwe decided to release it under [feature flag](https://docs.gitlab.com/ee/development/feature_flags/) and roll it out gradually by:\n\n1. Enable it on our staging environment and measure the performance.\n2. Enable it on GitLab.com on different periods for the `gitlab-org` group and measure the performance.\n3. Enable it globally on GitLab.com on different periods and measure the performance.\n\nFinally if no problem arises, we can be confident this change performs properly on GitLab.com and we can\nremove the feature flag.\n\n## Measuring the performance\n\nTo assess the execution of this approach, we monitored the [Sidekiq dashboards](https://dashboards.gitlab.com/d/9GOIu9Siz/sidekiq-stats?orgId=1) on Kibana to ensure jobs were being executed flawlessly and without using too much memory or CPU. Particularly, we observed the \"Sidekiq queue size,\" \"Rate of running jobs,\" and \"Running jobs\" dashboards.\n\n### On staging\n\nThe feature was enabled globally on staging and the execution of the jobs was satisfactory. But there was barely any traffic to measure the impact of our changes:\n\n![Graph showing the queue size of the ScheduleAggregationWorker on staging](https://about.gitlab.com/images/blogimages/namespace_statistics/staging-1.png){: .shadow.medium.center}\n\n### Enabling root namespaces on GitLab.com\n\nOur results were different on GitLab.com. We first enabled it for the `gitlab-org` group and we quickly started to observe more traffic:\n\n![Graph showing the queue size of the ScheduleAggregationWorker on GitLab.com](https://about.gitlab.com/images/blogimages/namespace_statistics/production-1.png){: .shadow.medium.center}\n\n![Graph showing the running jobs of the ScheduleAggregationWorker on GitLab.com](https://about.gitlab.com/images/blogimages/namespace_statistics/production-2.png){: .shadow.medium.center}\n\nOnce we enabled the feature flag globally, the rate of running jobs increased considerably:\n\n![Graph showing the rate running jobs of the ScheduleAggregationWorker on GitLab.com](https://about.gitlab.com/images/blogimages/namespace_statistics/production-3.png){: .shadow.medium.center}\n\n![Graph showing the rate running jobs of the RootStatisticsWorker on GitLab.com](https://about.gitlab.com/images/blogimages/namespace_statistics/production-4.png){: .shadow.medium.center}\n\n## Root namespaces on GitLab.com today\n\nWe currently have nearly `400 000` statistics stored for root namespaces on GitLab.com, which are updated at a high pace.\nBeing able to efficiently fetch those statistics allows one to easily track the top biggest repositories and/or namespaces on an instance\nand to start paving the way to enforce storage limits for groups on GitLab.com.\n\nLearn more about this use case by reading:\n\n- [The original issue](https://gitlab.com/gitlab-org/gitlab-ce/issues/62214)\n- [Merge Request with the implementation](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/28996)\n- [Details of the performance measured against staging and production (GitLab.com)](https://gitlab.com/gitlab-org/gitlab-ce/issues/64092)\n\nCover photo by [Bill Oxford](https://unsplash.com/@bill_oxford?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/engineering?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText).\n{: .note}\n",[915],{"slug":5875,"featured":6,"template":678},"database-case-study-store-and-update-namespace-statistics","content:en-us:blog:database-case-study-store-and-update-namespace-statistics.yml","Database Case Study Store And Update Namespace Statistics","en-us/blog/database-case-study-store-and-update-namespace-statistics.yml","en-us/blog/database-case-study-store-and-update-namespace-statistics",{"_path":5881,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5882,"content":5888,"config":5893,"_id":5895,"_type":16,"title":5896,"_source":17,"_file":5897,"_stem":5898,"_extension":20},"/en-us/blog/gitlab-journey-to-cicd",{"title":5883,"description":5884,"ogTitle":5883,"ogDescription":5884,"noIndex":6,"ogImage":5885,"ogUrl":5886,"ogSiteName":692,"ogType":693,"canonicalUrls":5886,"schema":5887},"GitLab's unconventional journey to CI/CD and Kubernetes","How the Delivery team at GitLab used our existing resources to overhaul our system to make way for CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678397/Blog/Hero%20Images/raphael-biscaldi-cicd.jpg","https://about.gitlab.com/blog/gitlab-journey-to-cicd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's unconventional journey to CI/CD and Kubernetes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-10-03\",\n      }",{"title":5883,"description":5884,"authors":5889,"heroImage":5885,"date":5890,"body":5891,"category":14,"tags":5892},[3676],"2019-10-03","\nEngineering teams are under pressure to provide value in the form of new features, all while minimizing [cycle time](/blog/reduce-cycle-time/). Oftentimes the instinct is to adopt modern tooling to make that happen. Continuous integration and delivery (CI/CD) is baked into GitLab, our single application for the DevOps lifecycle, and we are undergoing a major migration to Kubernetes to speed up our cycle time even more. But our journey to CI/CD and eventually Kubernetes has been unconventional, as the [Delivery team](/handbook/engineering/infrastructure/team/delivery/) elected to stress our current system as we step into [continuous delivery](/topics/continuous-delivery/) on GitLab.com before migrating entirely over to Kubernetes.\n\n## Releases before CI/CD\n\nThe wider GitLab community and GitLab team members [averaged 55 commits per day between Aug. 7 and Sept. 27, 2019](https://gitlab.com/gitlab-org/gitlab-ee/-/graphs/master/charts) as they continually iterate on our product to build new features for our customers. But before we adopted continuous delivery, we had to institute feature freeze periods beginning on the 7th of each month. During this period, engineers would shift their focus from building new features to fixing bugs in preparation for the upcoming release, which always happens on the 22nd.\n\n The use of a specific defined deadline encouraged behavior that ultimately caused developers to focus more on the due date and not around accomplishing the work.\n\n\"... developers would really play around the 7th because they would think ‘Oh, I have time, the 7th is in seven days,’ and then on the 6th at midnight they would panic merge things,\" said [Marin Jankovski](/company/team/#marin), engineering manager for the Delivery team. \"Because they know that if they missed this deadline they will have to wait for the next month, and if they get it in under this deadline they have a good two weeks to fix any problems that happen.\"\n\nSince the conception of GitLab.com, the feature freeze was used as a stabilization period, Marin explained.\n\nSoon though, the demand for new features from new users was pushing us to escalate our development velocity on GitLab.com. The stabilization period slowed our cycle time and created a significant drag in our turnover time for bug fixes, regression, and feature shipping for users both on GitLab.com and self-managed customers.\n\n“In some cases (the feature freezes) would even cause platform instability due to the fact that highest priority fixes couldn't find its way into customer hands quick enough,” said Marin. “By moving to CD, we can get both features and bug fixes alike into the hands of our users much quicker.”\n\nBefore the [Delivery team was created to manage GitLab.com's transition to continuous delivery](/handbook/engineering/infrastructure/team/delivery/#top-level-responsibilities) – and eventually Kubernetes – we depended upon a [release manager](/blog/release-manager-the-invisible-hero/), a rotating position among developers, to prepare the release. The [release process was iterated on over a five-year period](/community/release-managers/) as the release managers created a knowledge base and some automation to make the release process work.\n\nBut this method was inefficient as the timing behind the deployment process and release preparations was unpredictable, taking between half a day to multiple days due to the [accumulation of manual tasks in the process](https://gitlab.com/gitlab-org/release/docs/blob/master/general/tooling.md).\n\n“The release manager would get a set task list to go through, a deadline by which the tasks should be completed and they would have to repeat these steps over again until the release is ready, but also stable on GitLab.com,” explained Marin. At the highest level overview, the release manager had to:\n\n*   Manually sync the various repositories that GitLab consists of\n*   Ensure that the correct versions are set in the manually created Git branches\n*   Once the release is tagged, manually deploy to GitLab.com environments for both non-production and production\n*   Verify that everything is operational and manually publish the packages for self-managed users\n\nDuring his [presentation on this topic at GitLab Commit Brooklyn](https://youtu.be/lD-cYylwOLg), Marin shared the results of a 2018 survey which revealed that in the 14-day period before a release, the Delivery team spent 60% of their time babysitting deploys, and another 26% of their time on manual or semi-manual tasks release tasks, such as writing the monthly release post.\n\n![Task breakdown before CI/CD](https://about.gitlab.com/images/blogimages/journey-to-cicd/release-task-spread.jpg){: .medium.center}\nResults of a 2018 survey showing how the Delivery team spent their time two weeks before a release, before continuous delivery.\n{: .note.text-center}\n\n\"If you take a look at the whole thing, in 14 days, in two weeks, my team did nothing but sit on the computer and watch, well, paint dry, I guess,\" said Marin.\n\nBut by tackling 86% of the pie (60% deploys + 26% of the release manual tasks), the Delivery team could solve a few problems:\n\n1.  No release delays\n1.  Repeatable and faster deploys to enable no downtime\n1.  More time for our GitLab.com Kubernetes migration\n1.  More space to prepare the organization for continuous delivery\n\nAlthough CD is only on GitLab.com, our self-managed customers also benefit from our transition to CD. Now anything that isn't caught with CI testing is tested automatically and manually in environments before ever reaching GitLab.com. Anything that requires a fix that does reach GitLab.com can be fixed in a few hours, so the final release for self-managed customers won't include these particular issues.\n\n## Our unique approach to transitioning to CD and Kubernetes\n\nThe transition from using feature freezes to adopting CD on GitLab.com was inevitable as our features set grew, and a team of engineers, led by Marin, was formed to oversee this transition: “The Delivery team has been formed with the sole purpose of moving the company to a CD model for GitLab.com but at the same time for migrating GitLab.com to the Kubernetes platform to enable easier scaling and even faster turnaround times.”\n\nMany companies in GitLab’s position would have started this journey to CI/CD and Kubernetes by first integrating the new technologies into their workflow, and amending the development process as they go. We opted for a different approach.\n\nThe migration to Kubernetes requires a shift in both production systems and the engineering mindset, explained Marin. Kubernetes offers some features that teams can easily leverage without any extra investment. But in order to derive the greatest value from the free features Kubernetes offers, there ought to be some existing CI/CD process already in place.\n\nThe Delivery team recognized that in order to smooth the transition to Kubernetes for continuous delivery, our engineers must already be working with a CI/CD mindset – this includes a strong focus on quality assessments (QA) and stricter feature planning. So the Delivery team went with the [boring solution](https://handbook.gitlab.com/handbook/values/#boring-solutions) and used our existing tools to build a CD system and reorganize the application infrastructure of GitLab.com instead of first adopting new tooling and technologies for CD.\n\n“The idea was simple,” said Marin. “We [leverage the tools at our disposal](https://gitlab.com/gitlab-org/release/docs/blob/master/general/deploy/auto-deploy.md), automate most of the manual tasks and ‘stress test’ the whole static system. If the static system can withstand the test, we move toward a more dynamic test.”\n\nThere were two key benefits to taking this approach:\n\n**First**, any weaknesses in our application were exposed and stabilized by automating with CI, so our application is stronger and less brittle, making a complete migration to Kubernetes more likely to be a success.\n\n**Second**, by shifting the engineering team to the CD mindset, we created a cultural shift among the engineers at GitLab who were accustomed to weekly deploys and waiting up to a day to see the impact of their merge.\n\n> “The definition of ‘done’ for developers has changed since the adoption of CI/CD,” said Marin.\n\nBefore CI/CD, a change was “done” once the review was completed. This was excluding deployments to various environments which took a considerable amount of time. Today, deployments are shipped within hours so there is no reason to not confirm that a change is working in testing and production environments.\n\nThe adoption of review apps on Kubernetes allow developers to run QA checks in virtually real time, and the use of [feature flags](/blog/feature-flags-continuous-delivery/) for progressive delivery also helps to accelerate development.\n\n“Since the first step in CD, developers are required to react to any automated QA but also carry out another level of manual verification in both non-production and production environments. Additionally, developers can have their changes running in production within a day compared to multiple days (and weeks).”\n\nEveryone can run QA checks on their code more frequently with CD. Because code changes are shipped around the clock with our CI/CD system, developers now operate an on-call rotation to help with any outstanding issues that are happening live on GitLab.com since the \"incubation\" time is much shorter.\n\n## Our new method\n\nSince the adoption of a CI/CD system, 90% of the [release process is automated](https://gitlab.com/gitlab-org/release/tasks/issues/885) using the [CI features of GitLab](/direction/verify/continuous_integration/). The remaining 10% requires human intervention due to coordination between various stakeholders.\n\n“We are slowly reducing those 10% as well with the goal of having only approvals needed to publish a release,” said Marin. [In the current iteration, the CI/CD process operates as follows](/direction/ops/):\n\n*   CI automatically looks for specific labels in merged MRs, applied by code reviewers and developers.\n*   CI automatically syncs all required repositories but also creates required Git branches, tags, as well as setting the correct versions of the release we want to ship.\n*   When the builds complete, packages are automatically deployed to non-production environments.\n*   Automated QA tasks are executed and, if passing, the deployment is rolled out to a small subset of users in production.\n*   In parallel, developers do another level of manual QA to ensure that new features are functioning as expected.\n*   If a high severity issue is discovered with manual verification, the deployments are stopped.\n*   When the above is completed, a member of the Delivery team will trigger a rollout to all users on GitLab.com.\n*   Self-managed release is then created from the last known working deployment running on GitLab.com.\n\nAs is true for any engineering team, scaling remains a challenge for us. But one of the biggest technical challenges is making sure there is enough QA coverage, which can be labor intensive for a product as big at GitLab.com. Also, making sure the monitoring and alerting is sufficient so the product isn’t operating solely based upon pre-set rules.\n\nThe second major challenge is the complexity of our GitLab.com system, and communicating the change in process across our engineering teams. “Dismantling more than five years of built-up process and habit is never easy,” said Marin.\n\n## The results\n\nGitLab is already benefitting from the shift to CI/CD in a number of ways.\n\nThe results of a new 2019 survey assessing how the Delivery team spends their time in the same 14-day period before the release shows that today, 82% of the team's time is freed up to work on other important tasks.\n\n![Task breakdown since CI/CD](https://about.gitlab.com/images/blogimages/journey-to-cicd/chart.jpg){: .medium.center}\nThe results of a 2019 survey measuring the same two weeks before the release shows the switch to CD has freed up valuable developer time.\n{: .note.text-center}\n\nBy automating manual tasks, the Delivery team was able to shift their focus toward changing the GitLab.com infrastructure to better support our development velocity and user traffic, as well as beginning the migration to Kubernetes.\n\n> \"And, did I mention, none of this is on Kubernetes. All of this is using our 'old' legacy system,\" said Marin to the GitLab Commit Brooklyn audience. \"But what happened with this is we bought ourselves time, so my team actually has time to work on the migration. But one of the biggest changes that happened was in the habits of the engineering organization.\"\n\nThe results since the shift have been significant. The Delivery team went from around seven deploys under the old system in May 2019 to 35 deploys on GitLab.com in August 2019, and is on track to surpass these numbers considerably now that they're shipping multiple deploys a day.\n\n“We have just completed the migration of our Registry service to Kubernetes and if you use [Container Registry on GitLab.com](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/70), all your requests are served from the Kubernetes platform,\" said Marin. \"Since GitLab is a multi-component system, we are continuing to isolate and migrate other services.”\n\nNew CI/CD features are included in each release. For example, in our 12.3 release, we [expanded the GitLab Container Registry to allow users to leverage CI/CD to build and push images/tags to their project](/releases/2019/09/22/gitlab-12-3-released/#remove-container-images-from-cicd) among other exciting new features.\n\n## Transitioning your system to continuous delivery?\n\nFor companies considering the transition to CD, Marin advised to start with what you’ve got.\n\n“From my perspective, waiting for migrating to a new platform is the real ‘enemy,’” said Marin. “Most systems can be altered in some ways to enable faster turnaround time without migrating to a fully new system. Speeding up the development/release cycle has multiplier return per engineer in that system and that frees up more time for migrations to new platforms, such as Kubernetes.”\n\nIf you’re curious about what’s up next, [check out this detailed summary of the exciting new CI/CD features](/blog/a-look-ahead-for-gitlab-cicd/) on track to be released in 12.4 and beyond.\n\n## Missed GitLab Commit Brooklyn?\n\nIf you missed Marin's presentation on the prequel to Kubernetes, watch the entire video below and catch us in Europe at [GitLab Commit London on October 9](/events/commit/)!\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/lD-cYylwOLg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n[Cover Photo](https://unsplash.com/photos/rE3kbKmLmhE) by [Raphaël Biscaldi](https://unsplash.com/@les_photos_de_raph?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/journey?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[110,915],{"slug":5894,"featured":6,"template":678},"gitlab-journey-to-cicd","content:en-us:blog:gitlab-journey-to-cicd.yml","Gitlab Journey To Cicd","en-us/blog/gitlab-journey-to-cicd.yml","en-us/blog/gitlab-journey-to-cicd",{"_path":5900,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5901,"content":5907,"config":5912,"_id":5914,"_type":16,"title":5915,"_source":17,"_file":5916,"_stem":5917,"_extension":20},"/en-us/blog/why-we-chose-echarts",{"title":5902,"description":5903,"ogTitle":5902,"ogDescription":5903,"noIndex":6,"ogImage":5904,"ogUrl":5905,"ogSiteName":692,"ogType":693,"canonicalUrls":5905,"schema":5906},"Why we chose ECharts for data visualizations","Learn why GitLab switched from D3.js to ECharts as our library of choice for rendering data visualizations.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666699/Blog/Hero%20Images/banner.jpg","https://about.gitlab.com/blog/why-we-chose-echarts","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we chose ECharts for data visualizations\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Clement Ho\"}],\n        \"datePublished\": \"2019-09-30\",\n      }",{"title":5902,"description":5903,"authors":5908,"heroImage":5904,"date":5909,"body":5910,"category":14,"tags":5911},[4965],"2019-09-30","\nAs GitLab continues to grow in depth and breadth across the [DevOps lifecycle](/topics/devops/), the use of charts and data visualizations has increased in frequency and complexity. Throughout the life of GitLab as a project, we've used multiple libraries to render beautiful charts. As the number of different libraries increased along with our charting requirements, we decided it was time to start unifying our charting libraries to help us move quickly.\n\nAt first, we wanted to unify our charts using D3.js but this was difficult because D3.js isn't a charting library. In their own words: \"D3.js is a JavaScript library for manipulating documents based on data,\" meaning it is a low level visualization tool. D3.js is powerful but it has a big learning curve. Our team did not have the time to develop the expertise without impacting our product development velocity. We also knew we had an ambitious hiring plan, and we would be adding time to our onboarding process by using D3.js.\n\nThe frontend team set out to investigate different charting libraries that we could use to gain more velocity. The library didn't have to do everything we needed, but it had to get us most of the way there. We investigated many libraries including ECharts, Britecharts, and Plotly as potential options. In the end, ECharts was the clear winner for us. Here's why:\n\n## Echarts robust yet flexible chart types\nOn the monitor stage frontend team, we have the [ambitious goal of replacing well-known monitoring tools like DataDog and Grafana](/direction/monitor/). It was absolutely critical that our charting library had enough flexibility for us to create our own custom charts, but it was also important that the library had existing charts so that we didn’t have to create every chart from scratch for the sake of development velocity.\n\nECharts has an [incredible showcase](https://echarts.apache.org/examples/en/) of the adaptability of their charts. This was a great starting point for us. We tested out styling ECharts to match our design system to determine how adaptable it was and we were very satisfied with the results.\n\n![design](https://about.gitlab.com/images/blogimages/why-we-chose-echarts/design.png)\n*Design spec for future GitLab charts.*\n\n![implementation](https://about.gitlab.com/images/blogimages/why-we-chose-echarts/implementation.png)\n*Evaluation implementation using ECharts.*\n\n## Echarts performance\nWhen we were evaluating ECharts, we took one of our most complex user interactions for charts to benchmark the performance of the charting library. Although ECharts wasn’t perfect, it fared better than the alternatives. Below are some gifs recorded from changing the chart values in our [evaluation project](https://gitlab.com/adriel/echarts-proof-of-concept). As you can see, performance does decrease as the data points increase but it is still usable and it is unlikely we would have that many points in such a small chart.\n\n![10 values](https://about.gitlab.com/images/blogimages/why-we-chose-echarts/10-points.gif)\n*Linked chart with 10 values.*\n\n![100 values](https://about.gitlab.com/images/blogimages/why-we-chose-echarts/100-points.gif)\n*Linked chart with 100 values.*\n\n![1000 values](https://about.gitlab.com/images/blogimages/why-we-chose-echarts/1000-points.gif)\n*Linked chart with 1000 values.*\n\n![4000 values](https://about.gitlab.com/images/blogimages/why-we-chose-echarts/4000-points.gif)\n*Linked chart with 4000 values.*\n\n## Growing ecosystem\n\nECharts isn’t perfect but it has [improved over time](https://incubator.apache.org/projects/echarts.html). It started off as an [open source project from Baidu](https://whimsy.apache.org/board/minutes/ECharts.html) but is still going through the process of being incubated into the Apache Software Foundation. The [majority of ECharts users still seem to be based in China](https://echarts.apache.org/en/committers.html), meaning the developer community and corresponding documentation is written primarily in Chinese. Despite some language barriers, the ECharts community does seem to be growing more internationally. We’ve come across a variety of companies from the United States and Mexico who are either evaluating or using ECharts internally.\n\nThe Podling Project Management Committee (PPMC) of ECharts, which is their core team in GitLab terms, has also been very welcoming and energetic about growing the ecosystem. As we decided on ECharts and began developing new charts and replacing old charts, we’ve been able to build a partnership with the company. They have been very kind to meet with us online every month to help answer questions and to guide us in using their library effectively. This has been extremely helpful. For example during one of our meetings, Shuang Su gave us a brief walkthrough of the codebase and it's architecture.\n\n## Where we are today with Echarts\n\nWe introduced [ECharts to the GitLab codebase in 11.6](https://gitlab.com/gitlab-org/gitlab-ce/issues/53147) and through ECharts have been rapidly building new chart types into our component library at a faster rate than ever before. We started with updating the charts in just our Monitor stage but have since introduced charts into the [Secure](https://gitlab.com/gitlab-org/gitlab-ee/issues/6954) and [Manage](https://gitlab.com/gitlab-org/gitlab-ee/issues/12079) stages.\n\nDepending on your use case, Apache ECharts could be a good fit for you too. For our team, ECharts has without a doubt increased our product development velocity over against what it was with D3.js.\n\n| Old chart in D3.js | New chart in ECharts |\n|",[915,4300,1979],{"slug":5913,"featured":6,"template":678},"why-we-chose-echarts","content:en-us:blog:why-we-chose-echarts.yml","Why We Chose Echarts","en-us/blog/why-we-chose-echarts.yml","en-us/blog/why-we-chose-echarts",{"_path":5919,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5920,"content":5926,"config":5931,"_id":5933,"_type":16,"title":5934,"_source":17,"_file":5935,"_stem":5936,"_extension":20},"/en-us/blog/building-a-cicd-pipeline-in-20-mins",{"title":5921,"description":5922,"ogTitle":5921,"ogDescription":5922,"noIndex":6,"ogImage":5923,"ogUrl":5924,"ogSiteName":692,"ogType":693,"canonicalUrls":5924,"schema":5925},"How to build a CI/CD pipeline in 20 minutes or less","Deploying your pipeline to Kubernetes is just a 'git push' away using GitLab's Auto DevOps feature.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666903/Blog/Hero%20Images/pipeline.jpg","https://about.gitlab.com/blog/building-a-cicd-pipeline-in-20-mins","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to build a CI/CD pipeline in 20 minutes or less\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-09-26\",\n      }",{"title":5921,"description":5922,"authors":5927,"heroImage":5923,"date":5928,"body":5929,"category":14,"tags":5930},[3676],"2019-09-26","\nIn software development, time really is money. GitLab users know that by using our [Auto DevOps functionality](https://docs.gitlab.com/ee/topics/autodevops/), you can move from code to production in just two simple steps.\n\n[Eddie Zaneski](https://gitlab.com/eddiezane) of Digital Ocean joined us in Brooklyn at [GitLab Commit, our inaugural user conference](/blog/wrapping-up-commit/). In an informative and light-hearted talk, Eddie demonstrated how to build and deploy a [CI/CD pipeline](/topics/ci-cd/) to a Kubernetes cluster from scratch or by using GitLab’s [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) tooling in less than 20 minutes.\n\nIn the demo, Eddie and his co-founder were really wingin’ it when building an app for the “startup” he used for this demo, the Screaming Chicken Club.\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">Massive shoutout to \u003Ca href=\"https://twitter.com/kamaln7?ref_src=twsrc%5Etfw\">@kamaln7\u003C/a> for building \u003Ca href=\"https://t.co/kke5hc2FC8\">https://t.co/kke5hc2FC8\u003C/a> and lending it to me for \u003Ca href=\"https://twitter.com/hashtag/GitLabCommit?src=hash&amp;ref_src=twsrc%5Etfw\">#GitLabCommit\u003C/a>\u003C/p>&mdash; Eddie Zaneski (@eddiezane) \u003Ca href=\"https://twitter.com/eddiezane/status/1174044146002288640?ref_src=twsrc%5Etfw\">September 17, 2019\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n“I'm trying to raise money right now and VCs are caring about my tech,” said Eddie of his hypothetical start-up. “An easy way to score credit with VCs by having a super secure and well-thought-out DevOps pipeline, and that's where GitLab really comes into play here.”\n\n[Auto DevOps](/topics/devops/) is an out-of-the-box solution that helps move your code into production faster by automating the complex components of building a CI/CD pipeline, such as: “Building your application into a container; checking it for vulnerabilities; checking it for dependencies, checking it for licenses; deploying that to a Kubernetes cluster; setting up host names; DNS, TLS certs; automatically renewing them for you and doing performance testing.”\n\nSo where do you start?\n\n## Spin up your Kubernetes cluster\n\nGitLab has an airtight integration with Kubernetes that makes it possible to [deploy software from GitLab’s CI/CD pipeline to Kubernetes](/solutions/kubernetes/) by using Auto DevOps or by building the pipeline yourself. Either way, the first step will be to [configure a new Kubernetes cluster to deploy your application](https://docs.gitlab.com/ee/user/project/clusters/index.html).\n\nIt’s really as simple as toggling to the lefthand sidebar on GitLab and clicking Kubernetes > Operations > Add a Cluster. This process works for [GCP or GKE users](https://docs.gitlab.com/ee/user/project/clusters/index.html#add-new-gke-cluster), as well as those that are not on Google Cloud or are using an on-prem solution. In the demo, Eddie used Digital Ocean’s managed Kubernetes service to create the cluster, select the data center, and pick the size of the node. Eddie estimated this process would take anywhere from three to five minutes.\n\nThe next step is to integrate the Kubernetes cluster into the project, which requires a number of manual tasks, including grabbing the URL for the Kubernetes API server, creating a service account and binding it to the cluster admin, and grabbing the service token that’s generated. In the spirit of innovative shortcuts, Eddie created a [kubectl plugin](https://gitlab.com/eddiezane/kubectl-gitlab_bootstrap) that makes it even easier to add the Kubernetes cluster to the associated GitLab project.\n\n“This is actually going to automatically bootstrap a Kubernetes cluster into your GitLab project, create all the service accounts, make all the GitLab API requests, and take care of everything under the hood.” Thanks, Eddie!\n\nNext, just grab the GitLab project ID, and run:\n\n`kubectl gitlab-bootstrap gitlab-project-id`\n\nThe result is a URL. Follow the URL to see more about the Kubernetes cluster in your GitLab project.\n\n## GitLab-managed applications make your life easier\n\nOnce you’re there, you’ll see a list of [GitLab-managed applications](https://docs.gitlab.com/ee/topics/autodevops/cloud_deployments/auto_devops_with_gke.html). These apps can be installed in just one click to help manage your new Kubernetes cluster.\n\n1. [Helm](https://docs.gitlab.com/ee/update/removals.html): Install Helm first, because it is the package manager for Kubernetes and is required to install the other applications.\n2. [Ingress](https://docs.gitlab.com/ee/update/removals.html): Once Helm is installed, you can install the [Ingress controller](https://docs.gitlab.com/ee/update/removals.html), which will handle all the routing and mapping within the cluster and will create a load balancer behind the scenes. **Copy the IP address that’s displayed; you’ll need it later.**\n3. [Prometheus](https://docs.gitlab.com/ee/update/removals.html): An open source tool that monitors your deployed applications.\n4. [Cert-Manager](https://docs.gitlab.com/ee/update/removals.html): This will handle all the certificates and make sure everything is up to date.\n5. [GitLab Runner](https://docs.gitlab.com/ee/update/removals.html): Lets you run your GitLab CI/CD on your own host, or within the Kubernetes cluster.\n\nThe superstar of the bunch is GitLab Runner, the open source project that is used to run your CI/CD jobs and send the results back to GitLab.\n\nChanges include:\n\n## Launch Auto DevOps with the click of a button\n\nOnce you’ve created your Kubernetes cluster and installed the required applications, [launch the Auto DevOps process with the click of a button](https://docs.gitlab.com/ee/topics/autodevops/cloud_deployments/auto_devops_with_gke.html), literally.\n\n![Enable Auto DevOps](https://about.gitlab.com/images/blogimages/guide_enable_autodevops.jpg){: .shadow.medium.center}\n\nBy enabling Auto DevOps and selecting your deployment strategy (here is where you need the Ingress IP address), you kick off the CI/CD pipeline.\n\n## Or launch your own Auto DevOps process\n\nDon’t want to use our out-of-the-box Auto DevOps feature? You don’t have to. The good news is the underlying source code is available to you for each component of the deployment process, making it easy for you to parse out what jobs you'd like to run.\n\n“The great thing about GitLab being open source is nothing is magic, right? All this stuff is source code that we can all go look up and read,” says Eddie.\n\nThe source code for the entire out-of-the-box Auto DevOps process lives in [one YAML file](https://gitlab.com/gitlab-org/gitlab-foss/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml) in the GitLab repository. GitLab users are able to separately run jobs for each stage in the Auto DevOps process, from build to cleanup, simply by copy/pasting the [underlying source code](/solutions/source-code-management/) into a properly configured terminal.\n\nThe individual templates and components for the important jobs in each Auto DevOps stage are included in the YAML file. You can select which components you’d like to use. Note that nothing needs to be imported, because it all comes with your GitLab install.\n\nIn the demo, Eddie ran the jobs for the build and deploy stages as examples.\n\nRemember to return to the load balancer and grab the IP address Ingress created to configure your DNS, `git push`, then, viola! Your CI/CD pipeline is running.\n\n## A peek inside the pipeline\n\nDuring the demo Eddie went behind the scenes to explain what was happening inside the pipelines for the build and deploy jobs he started.\n\n### Build\n\n“It's going to take care of a lot of stuff behind the hood for us,” said Eddie. The pipeline uses Docker to build containers inside Docker, which will log in to our Kubernetes cluster’s container registry.\n\n“So GitLab automatically provides you with a container registry for your project,” said Eddie. “It's going to substitute in a whole bunch of environment variables and handles all the logins and generates the token, and all that. So we don't actually have to think about anything.”\n\nNext, the Docker base image loads. Eddie went into more detail about how to write up the Docker set-up, but the GitLab build component can automatically figure out the type of project you’re running and generates a Docker file with best practices to build the container.\n\n“So my project is building, compiling, pushing up my layers to the container registry, and then my build job should finish real quick and then my deploy job is going to kick off,” explained Eddie.\n\n### Deploy\n\nThe deploy job kicks off by spinning up a Helm chart that automatically fills the required information, such as the container ID, the host name, namespace, etc., into the template. Then it will create the Ingress ID, and then deploy the application.\n\n## Put your CI/CD pipelines on autopilot with GitLab and Kubernetes\n\nIn just a few minutes, Eddie was able to demonstrate two different ways to build a CI/CD pipeline by using GitLab and Kubernetes. While our Auto DevOps feature makes it so you don’t have to create a bunch of YAMLs from scratch (because, let’s face it, if you’re running Kubernetes you’re already running a ton of YAMLs), our open source Auto DevOps process makes it possible to pick and choose which components or jobs you’d like to run.\n\nWatch the entire video from GitLab Commit Brooklyn to see Eddie run a **third** CI/CD pipeline during his 17-minute talk.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/-shvwiBwFVI\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\nLike what you see? [Join us in London](/events/commit/) on October 9 for our second GitLab Commit event with all new talks!\n",[1002,873],{"slug":5932,"featured":6,"template":678},"building-a-cicd-pipeline-in-20-mins","content:en-us:blog:building-a-cicd-pipeline-in-20-mins.yml","Building A Cicd Pipeline In 20 Mins","en-us/blog/building-a-cicd-pipeline-in-20-mins.yml","en-us/blog/building-a-cicd-pipeline-in-20-mins",{"_path":5938,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5939,"content":5945,"config":5950,"_id":5952,"_type":16,"title":5953,"_source":17,"_file":5954,"_stem":5955,"_extension":20},"/en-us/blog/why-we-created-the-gitlab-memory-team",{"title":5940,"description":5941,"ogTitle":5940,"ogDescription":5941,"noIndex":6,"ogImage":5942,"ogUrl":5943,"ogSiteName":692,"ogType":693,"canonicalUrls":5943,"schema":5944},"Why we created a Memory team at GitLab","GitLab has a memory problem, so we created a specialized team to fix it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678549/Blog/Hero%20Images/memory_team_arie-wubben.jpg","https://about.gitlab.com/blog/why-we-created-the-gitlab-memory-team","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we created a Memory team at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-09-13\",\n      }",{"title":5940,"description":5941,"authors":5946,"heroImage":5942,"date":5947,"body":5948,"category":14,"tags":5949},[3676],"2019-09-13","\nGitLab is an [all-in-one DevOps solution](/topics/devops/) with a growing feature set. But as more features are added to the application, more memory is required. Some users have reportedly elected to migrate to other tools because the memory footprint required to run a minimum GitLab instance was exorbitant:\n\n> “GitLab is great and I have used it for years but I recently switched to Gogs for self-hosted repositories because it is much faster, easier to set up, and walk in a park to maintain. It doesn't have all the features (bloat) that GitLab has but it can probably satisfy >95% of Git users.” – [Jnr on HackerNews](https://news.ycombinator.com/item?id=19227935)\n\n> “If GitLab grows any more features I'll be moving away simply to ensure confidence that I understand my own infrastructure in the limited time I have to maintain it. It's the weirdest kind of success problem to have, but the truth is if it wasn't such a pain to make the move, I'd have transitioned away from GitLab six months ago.” – [Sir_Substance on HackerNews](https://news.ycombinator.com/item?id=19230557)\n\n## Step 1: Establish priorities to solve our memory problem\n\nWe created the [GitLab Memory team](/handbook/engineering/development/enablement/data_stores/application_performance/) to tackle this performance challenge. The aim of the Memory team is to [reduce the minimum instance for GitLab from 8GB](https://gitlab.com/gitlab-org/gitlab-ce/commit/0cd5d968038d6d64d95add0bbe3d63d8fcfdc23b) to 1GB of RAM. By reducing the memory required to run GitLab to 1GB, [our application can run anywhere](https://gitlab.com/groups/gitlab-org/-/epics/448), even on inexpensive commodity computers like an unaltered [Raspberry Pi 3 Model B+](https://www.raspberrypi.org/products/raspberry-pi-3-model-b-plus/).\n\nThere is no quick fix for reducing GitLab’s memory footprint, but the team has started by investigating memory and performance bottlenecks, gathering data, and prioritizing activities for the next three to four months based on these results.\n\n“We know we have memory issues to address, but we need more data to determine the source, the impact and how to best approach the problem,” says [Craig Gomes](/company/team/#craiggomes), memory engineering manager.\n\n[Kamil Trzciński](/company/team/#ayufanpl), distinguished engineer and memory specialist at GitLab, says the top three priorities for the Memory team fall into three distinct buckets:\n\n1. [Moving over to Puma](https://gitlab.com/groups/gitlab-org/-/epics/954)\n1. [Perform the low-level exercise by optimizing endpoints](https://gitlab.com/groups/gitlab-org/-/epics/448)\n1. [Improving our development practices](https://gitlab.com/groups/gitlab-org/-/epics/1415)\n\n### Migrating from Unicorn to Puma\n\nPreliminary research shows that the bulk of GitLab’s memory usage comes from running web application processes on Unicorn.\n\n“Each Web application process (Unicorn) can take 500 MB of RAM, and it can handle a single request at a time. The more users and traffic we need to support, the more processes and hence RAM we need,” says [Stan Hu](/company/team/#stanhu), engineering fellow at GitLab.\n\nOne of the first projects the Memory team is tackling is testing to see if migrating from Unicorn to Puma will reduce GitLab’s memory footprint. Both Unicorn and Puma are multi-threaded HTTP web servers that run on Rails, but unlike Unicorn, Puma is threaded and does not require as much memory.\n\nThe Memory team has successfully [configured Puma to work on dev.gitlab.com](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/82) to test its functionality and measure its memory reduction. The next big project in this domain is to [enable Puma on GitLab.com](https://gitlab.com/groups/gitlab-org/-/epics/954).\n\n### Dig deeper into what's causing memory issues for GitLab.com\n\nBefore GitLab is able to run on less memory, the team needs to fix the memory problems we know about already on GitLab.com. One of these problems is the memory killer on open source background processor, Sidekiq.\n\n\"If a Sidekiq job runs, takes too much memory, and then gets killed, jobs in the queue will be retried indefinitely,\" says Stan. The team is working to fix this, along with other priority one problems with memory usage in [project import](https://gitlab.com/gitlab-org/gitlab-ce/issues/59754) and [exports](https://gitlab.com/gitlab-org/gitlab-ce/issues/35389) in the 12.3 release.\n\n### Improve development practices around memory usage\n\nThe Memory team created a massive epic that aims to capture related [development work focusing on making improvements to internal dev practices around code complexity and memory usage](https://gitlab.com/groups/gitlab-org/-/epics/1415).\n\n\"The reason behind that is to enable everyone during development to understand the impact of introducing new changes to the application,\" says Kamil in the [epic](https://gitlab.com/groups/gitlab-org/-/epics/1415). Some of the projects they are working on for the 12.3 release include [testing more endpoints using typical GitLab user scenarios (e.g. commenting on a MR)](https://gitlab.com/gitlab-org/quality/performance/issues/34) and set up a [performance monitoring solution across different environments](https://gitlab.com/gitlab-org/quality/performance/issues/37).\n\n## Step 2: Create a team to fix the memory problem\n\nWe need a specialized engineering team to assess the scope of the problem and identify solutions to reduce GitLab’s memory requirements.\n\n“Right now we have a very small team with two brand new team members,” says Craig. “The team is getting up to speed quickly and there is a lot of excitement about the potential of the team that more work keeps coming our way. It's a great challenge to have, and having more experienced engineers on the team will help us to achieve our goals.”\n\nThe current memory team is small but mighty. We have [Craig](/company/team/#craiggomes), the engineering manager, and three engineers on the permanent memory team: [Kamil](/company/team/#ayufanpl), [Qingyu Zhao](/company/team/#qzhaogitlab), and [Aleksei Lipniagov](/company/team/#alipniagov). The team works closely with senior product manager for distribution and memory, [Larissa Lane](/company/team/#ljlane). [We’re looking for more qualified people to join our team](https://handbook.gitlab.com/job-families/engineering/backend-engineer/#memory).\n\nThe Memory team is actively hiring engineers to help us enhance GitLab’s performance, but we have a high rejection rate because we require a specific, hard-to-find skill set. A [top priority for the Memory team is hiring at least one senior engineer in FY20-Q3](https://gitlab.com/gitlab-com/www-gitlab-com/issues/4885), which will allow us to take on a bigger workload as we move toward our goal of getting GitLab running on less than 1GB.\n\nFollow along with the Memory team by [subscribing to their channel on GitLab Unfiltered](https://www.youtube.com/playlist?list=PL05JrBw4t0Kq_5ZWIHYfbcAYjtXYcEZA3).\n\nCover photo by [Arie Wubben](https://unsplash.com/@condorito1953?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/airplane?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[915,704,1286],{"slug":5951,"featured":6,"template":678},"why-we-created-the-gitlab-memory-team","content:en-us:blog:why-we-created-the-gitlab-memory-team.yml","Why We Created The Gitlab Memory Team","en-us/blog/why-we-created-the-gitlab-memory-team.yml","en-us/blog/why-we-created-the-gitlab-memory-team",{"_path":5957,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5958,"content":5964,"config":5971,"_id":5973,"_type":16,"title":5974,"_source":17,"_file":5975,"_stem":5976,"_extension":20},"/en-us/blog/how-to-avoid-broken-master-with-pipelines-for-merge-requests",{"title":5959,"description":5960,"ogTitle":5959,"ogDescription":5960,"noIndex":6,"ogImage":5961,"ogUrl":5962,"ogSiteName":692,"ogType":693,"canonicalUrls":5962,"schema":5963},"How to prevent broken master with merge trains & pipelines","Do you still run pipelines on source branches? Let's start running them on merge commits!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678366/Blog/Hero%20Images/merge-train.jpg","https://about.gitlab.com/blog/how-to-avoid-broken-master-with-pipelines-for-merge-requests","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to avoid broken master with Pipelines for Merged Results and Merge Trains\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Shinya Maeda\"}],\n        \"datePublished\": \"2019-09-11\",\n      }",{"title":5965,"description":5960,"authors":5966,"heroImage":5961,"date":5968,"body":5969,"category":14,"tags":5970},"How to avoid broken master with Pipelines for Merged Results and Merge Trains",[5967],"Shinya Maeda","2019-09-11","\nBroken master. This can happen when CI pipelines run on the master branch (or default branch), but don't\npass all tests. A red cross mark is shown in the project's top page, signalling unstable source\ncode and eroding the trust of users. Broken master could also be a blocker against\na continuous deployment/delivery stream line in which deployment jobs\nare executed after the test stage passed in master pipelines.\n\nAll maintainers want to avoid this critical state,\nbut how can we prevent it?\n\n## Let's look at how master is broken in the first place\n\nLet's say you're one of the maintainers of a project. It's a busy repository with hundreds of merges\nto master every day. A developer assigns a merge request (MR) to you. The MR passed all of the tests in the CI pipelines,\nhas been reviewed thoroughly by code reviewers, all open discussions have been resolved, and the MR has been\napproved by the relevant [code owners](https://docs.gitlab.com/ee/user/project/codeowners/).\n\nYou would press the \"Merge\" button without a second thought, but how are you confident that\na pipeline running on master branch after the merge will pass all tests again?\nIf your answer is \"It might break the master branch,\" then\nyou're right. This could happen, for example, if master has advanced by some\nnew commits, and one of them changed a lint rule. The MR in question\nstill contains an invalid coding style, but the latest pipeline on the MR passes,\nbecause the feature branch is based on an old version of master.\n\nEnter two new GitLab features: [Pipelines for Merged Results](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/index.html)\nand [Merge Trains](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/merge_trains/index.html).\nLet me show you how they works and how to enable them.\n\n## How to continually run CI pipelines on the merge commit\n\nLet's break down what went wrong in the scenario above. Even though the pipeline on the\nmerge request passed all the tests, it ran on a source (feature) branch\nwhich could be based on an outdated version of master. In such a case,\nthe result of pipeline is considered as _untrusted_, because there may be a huge difference\nbetween an actual-and-future merge commit and the commit in question.\n\nAs a [boring solution](https://handbook.gitlab.com/handbook/values/#boring-solutions), developers can continually rebase their MR\non the latest master, but this is annoying and inefficient, given the speed of\ngrowth of the master branch.\nIt causes a lot of friction between developers and maintainers, slowing down the development cycle.\n\nTo address this problem, we introduced [Pipelines for Merged Results](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/index.html)\nin [GitLab 11.10](/releases/2019/04/22/gitlab-11-10-released/#pipelines-for-merged-results).\n\nSimply put, the main difference between pipelines for merged results and normal pipelines is that\n**pipelines run on merge commits, instead of source branches, before the actual merge happens**.\nThis merge commit is generated from the latest commits of target branch and\nsource branch and written in a temporary place (`refs/merge-requests/:iid/merge`).\nTherefore, we can run a pipeline on it without interfering with master.\n\nHere is a sample workflow with the above scenario:\n\n1. A developer pushes a new commit to a merge request.\n1. GitLab creates a merge commit from the HEAD of the source branch and HEAD of the target branch.\n   This merge commit is written in `refs/merge-requests/:iid/merge` and does not change commit history of master branch.\n1. GitLab creates a pipeline on the merge commit, but this pipeline fails because the latest master changed a lint rule.\n1. A maintainer sees a failed pipeline in the merge request.\n\nAs you can see, the maintainer was able to hold off merging the dangerous MR\nbecause the latest pipeline on the MR didn't pass. The feature actually saved\nmaster from a broken state.\n\nAs a bonus, this workflow freeds developers from continual\nrebasing of their merge requests.\nAll they need to do is develop features with [Pipelines for Merged Results](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/index.html).\nGitLab automatically creates an expected merge commit and validates the merge request prior to\nan actual merge.\n\n### How to get started with Pipelines for Merged Results\n\nYou can [start using this feature](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/index.html#enabling-pipelines-for-merged-results)\ntoday, with just two steps:\n\n1. Edit the `.gitlab-ci.yml` config file to enable [pipelines for merge requests / merge request pipelines](https://docs.gitlab.com/ee/ci/merge_request_pipelines/).\n1. Enable the \"Merge pipelines will try to validate the post-merge result prior to merging\" option at **Settings > General > Merge requests** in your project.\n\n**Note:** If the configurations in your `.gitlab-ci.yml` file are too complex, you might stumble at the first point.\nWe're currently working on [improving the usability of pipelines for merge requests / merge request pipelines](https://gitlab.com/gitlab-org/gitlab-ce/issues/60085).\nPlease leave your feedback in the issue if that's the case.\n\n## How to avoid race condition of concurrent merges\n\nWith [Pipelines for Merged Results](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/index.html),\nwe can confidently say that MRs are continually tested against the latest master branch.\nHowever, what if multiple MRs have been merged at the same time?\nFor example:\n\n- There are two merge requests: MR-1 and MR-2. The latest pipelines have already passed in both MRs.\n- John (maintainer) and Cathy (maintainer) merge MR-1 and MR-2 at the same time, respectively.\n\nLater on, it turns out that MR-2 contains a coding offence which has just been introduced by MR-1.\nMaintainers hit merge without knowing that, and\nneedless to say, this will result in broken master. How can we handle this race condition properly?\n\nIn [GitLab 12.1](/releases/2019/07/22/gitlab-12-1-released/#parallel-execution-strategy-for-merge-trains), we introduced a new feature,\n[Merge Trains](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/merge_trains/).\nBasically, a Merge Train is a queueing system that allows you to avoid this kind\nof race condition.\nAll you need to do is add merge requests to the merge train, and it\nhandles the rest of the work for you.\nIt creates merge commits according\nto the sequence of merge requests and runs pipelines on the expected merge commits.\nFor example, John and Cathy could have avoided broken master with the following workflow:\n\n1. John and Cathy add MR-1 and MR-2 to their [Merge Train](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/merge_trains/), respectively.\n1. In MR-1, the Merge Train creates an expected merge commit from HEAD of the source branch and HEAD of the target branch.\n   It creates a pipeline on the merge commit.\n1. In MR-2, the Merge Train creates an expected merge commit from HEAD of the source branch and the expected merge commit of MR-1.\n   It creates a pipeline on the merge commit.\n1. The pipeline in MR-1 passes all tests and merged into master branch.\n1. The pipeline in MR-2 fails because it violates a lint check which was changed by MR-1. MR-2 is dropped from the Merge Train.\n1. Developer revisits MR-2, fixes the coding offence, and asks Cathy to add it to the Merge Train again.\n\nAs you can see, the Merge Train successfully rejected MR-2 before it could break the master\nbranch. With this workflow, maintainers can feel more confident when they\ndecide to merge something. Also, this doesn't slow down development lifecycle\nthat pipelines are built on optimistic assumption that, in the above case,\nthe pipeline in MR-1 and the pipeline in MR-2 **start almost simultaneously**.\nMR-2 builds a merge commit as if MR-1 has already been merged, so that maintainers\ndon't need to wait for long time until each pipeline finished. If one of the\npipelines failed, the problematic merge request is dropped from the merge train\nand the train will be reconstructed without it.\n\n### How to get started with Merge Trains\n\nYou can [start using Merge Train](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/merge_trains/index.html#how-to-add-a-merge-request-to-a-merge-train)\ntoday, if you've already enabled [Pipelines for merged results](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/index.html#enabling-pipelines-for-merged-results). Click [\"Start/Add merge train\" button](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/merge_trains/index.html#how-to-add-a-merge-request-to-a-merge-train) in merge requests.\n\n## A quick demonstration of Merge Trains\n\nHere is a demonstration video that explains the advantage of Merge Train feature.\nIn this video, we'll simulate the common problem in a workflow without\nMerge Trains, and later, we resolve the problem by enabling a Merge Train.\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://www.youtube.com/embed/D4qCqXgZkHQ\" frameborder=\"0\" allowfullscreen=\"true\">\n\u003C/iframe>\n\u003C/figure>\n\n## Wrap up\n\nRunning pipelines on expected merge commits allows us to predict what will happen\nin the future and avoid broken master proactively. It soothes the headache of\nrelease managers and gives maintainers and developers more confidence that their code\nis reliable enough to be merged and shipped. In addition, Merge Trains allow you\nto merge things safely without slowing down the development cycle.\n\nGive this advanced CI/CD feature a try today!\n\nFor more information, check out [the documentation on merge trains](https://docs.gitlab.com/ee/ci/pipelines/merge_trains.html) and [pipelines for merge requests / merge request pipelines](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html).\n\nCover image by [Dan Roizer](https://unsplash.com/@danny159) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[110,2932,727,749],{"slug":5972,"featured":6,"template":678},"how-to-avoid-broken-master-with-pipelines-for-merge-requests","content:en-us:blog:how-to-avoid-broken-master-with-pipelines-for-merge-requests.yml","How To Avoid Broken Master With Pipelines For Merge Requests","en-us/blog/how-to-avoid-broken-master-with-pipelines-for-merge-requests.yml","en-us/blog/how-to-avoid-broken-master-with-pipelines-for-merge-requests",{"_path":5978,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5979,"content":5985,"config":5990,"_id":5992,"_type":16,"title":5993,"_source":17,"_file":5994,"_stem":5995,"_extension":20},"/en-us/blog/building-build-images",{"title":5980,"description":5981,"ogTitle":5980,"ogDescription":5981,"noIndex":6,"ogImage":5982,"ogUrl":5983,"ogSiteName":692,"ogType":693,"canonicalUrls":5983,"schema":5984},"Getting [meta] with GitLab CI/CD: Building build images","Let's talk about building build images with GitLab CI/CD. The power of Docker as a build platform is unleashed when you get meta.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678567/Blog/Hero%20Images/building-blocks.jpg","https://about.gitlab.com/blog/building-build-images","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Getting [meta] with GitLab CI/CD: Building build images\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2019-08-28\",\n      }",{"title":5980,"description":5981,"authors":5986,"heroImage":5982,"date":5987,"body":5988,"category":14,"tags":5989},[2558],"2019-08-28","\n> An alternative title for this post could have been:\n>\n> I heard you liked Docker, so I put [dind](https://hub.docker.com/_/docker/).\n\n## Getting started\nIt should be clear by now that I love building stuff with GitLab CI/CD. From\n[DNS](https://medium.com/gitlab-magazine/ci-cd-all-the-things-pihole-625a0ceaf12)\nto [breakfast](/blog/introducing-auto-breakfast-from-gitlab/) GitLab CI/CD\noffers a pretty wide range. However, past those \"fun\" use cases, I also like\nto share some ~~best~~ practices I have acquired during my years of using [GitLab\nCI/CD](/solutions/continuous-integration/), both for software and non-software projects alike.\n\nI crossed out \"best\" above because I don't really like the term \"best practices.\" It\nimplies that there is only one right answer to a given question – which is the\nopposite of the point of computer science. Sure there are better and worse ways to\ndo something – but like many things in life, you have to find what works for\nyou. \"[The best camera is the one you have with you](https://www.amazon.com/Best-Camera-One-Thats-You/dp/0321684788)\"\ncomes to mind when building CI/CD for projects. Something that works is better than something that's pretty.\n\nBut, enough of my digression, let's get to the practice I wanted to share in this\npost: Building build images as part of the build process. Yes, it is precisely as meta as it sounds.\n\n## Why?\n\nOften when building a particular project, you may have several unique build dependencies.\nIn many languages, package managers solve for the majority if not all of these\ndependencies – at least for build time (think [npm](https://www.npmjs.com), [RubyGems](https://rubygems.org/),\n[Maven](https://maven.apache.org/what-is-maven.html)). However, when we are building and\ndeploying (CI/**CD** let's remember) from a machine that is not our own, that may not\nbe enough. There may be a few dependencies we might need from elsewhere.\n\nThe language libraries themselves are one such dependency – to build Java I'm going to need\nthe JDK or JRE. To build Node, I'll need... well Node, etc. In a Docker-based environment,\nthose languages and dependencies typically have an official image on Docker\nHub ([JRE from Oracle](https://hub.docker.com/_/oracle-serverjre-8) or\n[Node from Node.js](https://hub.docker.com/_/node) for instance). Assume, however, that\nI may need a few other things not included in **either** those official Docker images or\nthe package manager I'm using. For instance, maybe I need a CLI tool for\ndeploy ([AWS](https://aws.amazon.com/cli/), [Heroku](https://devcenter.heroku.com/articles/heroku-cli),\n[Firebase](https://firebase.google.com/docs/cli), etc.). We also might need a testing\nframework or tool like [Selenium](https://www.seleniumhq.org) or\n[headless Chrome](https://developers.google.com/web/updates/2017/04/headless-chrome).\nOr I may need other tools for packaging, testing, or deployment.\n\nSometimes there is a Docker image on Docker Hub for these combinations – or some of\nthem – but not always a maintained version. One easy solution to this could be to\njust run the install of the tools before every job that needs it. This can\neven be \"automated\" using something like\nthe [before_script](https://docs.gitlab.com/ee/ci/yaml/#before_script-and-after_script) syntax.\nHowever, this adds time to our pipeline and seems inefficient: Is there a better way?\n\n## Enter the GitLab Docker registry\nSince GitLab is a single application for the entire [DevOps](/topics/devops/) lifecycle – it actually\nships out of the box with a built-in\n[Docker registry](https://docs.gitlab.com/ee/user/packages/container_registry/index.html).\nThis can be a useful tool when deploying code in a containerized environment. We can\nbuild our application into a container and send it off into Kubernetes or some\nother Docker orchestrator.\n\nHowever, I also see this registry as an opportunity to save time in my\npipeline (and save round trips to Docker hub and back every time). For builds that require\nsome of these extra dependencies, I like to build a \"build\" Docker image.\nThat way, I have an image with all of those baked right in. Then, as part of my\npipeline, I can build the image at the start (only when changes are made or every time).\nAnd the rest of the pipeline can consume that image as the base image.\n\n## Putting it in practice\nFor example, let's see what it looks like to build a simple Docker image to use with\ndeploying to [Google Firebase](https://firebase.google.com/).\n\nFirebase is a \"backend as a service\" tool that provides a database, authentication,\nand other services across platforms (web, iOS, and Android). It also includes web hosting\nand several other items that can be deployed through [a CLI](https://firebase.google.com/docs/cli).\nThis tool makes getting started really easy. You can deploy the whole stack with\n`firebase deploy.` Alternatively, you can deploy a part (like [serverless](/topics/serverless/) functions)\nwith a command like `firebase deploy --only functions.`\n\nMaking this work in a CI/CD world requires a few extra steps though. We'll need a Node\nDocker image that has the firebase CLI in it, so let's make a simple Dockerfile to do that.\n\n> Putting this Dockerfile in `.meta/Dockerfile`\n\n```dockerfile\nFROM node:10\n\nRUN npm install -g firebase-tools\n```\n\nNext, I'll add a job to the front of my pipeline.\n\n> Added to the front of my `.gitlab-ci.yml`\n\n```yaml\nmeta-build-image:\n  image: docker:stable\n  services:\n    - docker:dind\n  stage: prepare\n  script:\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - cd .meta\n    - docker build -t $CI_REGISTRY/group/project/buildimage:latest .\n    - docker push $CI_REGISTRY/group/project/buildimage:latest\n  only:\n    refs:\n      - main\n    changes:\n      - .meta/Dockerfile\n```\n\nLet's break down that job:\n1. We use the `docker:stable` image and a service of `docker:dind`\n1. The stage is my first stage called `prepare`\n1. In the script, we login to the GitLab registry with the built-in variables and build the\nimage. For more details see the [GitLab documentation for building Docker images](https://docs.gitlab.com/ee/ci/docker/using_docker_build.html).\n1. We only run this on `main` and only when the `.meta/Dockerfile` changes. This makes\nsure we are specific about when we change the Docker image. We could also use the\ncommit hash or other methods here to make the image more fungible.\n\nNow, in further jobs down the pipeline, I can use the latest build of the Docker image like this:\n\n```yaml\nfirestore:\n  image: registry.gitlab.com/group/project/buildimage\n  stage: deploy 🚢🇮🇹\n  script:\n    - firebase deploy --only firestore\n  only:\n    changes:\n      - .firebase-config/firestore.rules\n      - .firebase-config/firestore.indexes.json\n```\n\nIn this job, we only run the job if something about\nthe [Firestore](https://firebase.google.com/docs/firestore) (the database from Firebase)\nconfiguration changes. And when it does, we run the `firestore deploy` command in CI. I\nalso added a token for deploy as a [GitLab CI/CD variable](https://docs.gitlab.com/ee/ci/variables/)\nbased off the Firebase documentation\nfor [using firebase with CI](https://firebase.google.com/docs/cli#admin-commands).\n\n## Summary\nIn the end, this helps speed up pipelines by ensuring that you have a custom-built build\nimage that you control. You don't have to rely on unstable or unmaintained Docker Hub\nimages or even have a Docker Hub account yourself to get started.\n\nTo learn more about GitLab CI/CD you can [read the GitLab website](/solutions/continuous-integration/)\nor the [CI/CD docs](https://docs.gitlab.com/ee/ci/introduction/). Also, there's a lot more to\nlearn about the [GitLab Docker registry](https://docs.gitlab.com/ee/user/packages/container_registry/index.html).\n\nCover image by [Hack Capital](https://unsplash.com/@markusspiske?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/build?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText).\n{: .note}\n",[110,1002,726],{"slug":5991,"featured":6,"template":678},"building-build-images","content:en-us:blog:building-build-images.yml","Building Build Images","en-us/blog/building-build-images.yml","en-us/blog/building-build-images",{"_path":5997,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5998,"content":6003,"config":6008,"_id":6010,"_type":16,"title":6011,"_source":17,"_file":6012,"_stem":6013,"_extension":20},"/en-us/blog/tyranny-of-the-clock",{"title":5999,"description":6000,"ogTitle":5999,"ogDescription":6000,"noIndex":6,"ogImage":2165,"ogUrl":6001,"ogSiteName":692,"ogType":693,"canonicalUrls":6001,"schema":6002},"6 Lessons we learned when debugging a scaling problem on GitLab.com","Get a closer look at how we investigated errors originating from scheduled jobs, and how we stumbled upon \"the tyranny of the clock.\"","https://about.gitlab.com/blog/tyranny-of-the-clock","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"6 Lessons we learned when debugging a scaling problem on GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Craig Miskell\"}],\n        \"datePublished\": \"2019-08-27\",\n      }",{"title":5999,"description":6000,"authors":6004,"heroImage":2165,"date":6005,"body":6006,"category":14,"tags":6007},[1463],"2019-08-27","\nHere is a story of a scaling problem on GitLab.com: How we found it, wrestled with it, and ultimately resolved it. And how we discovered the tyranny of the clock.\n\n## The problem\n\nWe started receiving reports from customers that they were intermittently seeing errors on Git pulls from GitLab.com, typically from CI jobs or similar automated systems. The reported error message was usually:\n```\nssh_exchange_identification: connection closed by remote host\nfatal: Could not read from remote repository\n```\nTo make things more difficult, the error message was intermittent and apparently unpredictable. We weren't able to reproduce it on demand, nor identify any clear indication of what was happening in graphs or logs. The error message wasn't particularly helpful either; the SSH client was being told the connection had gone away, but that could be due to anything: a flaky client or VM, a firewall we don't control, an ISP doing something strange, or an application problem at our end. We deal with a *lot* of connections to Git-over-SSH, in the order of ~26 million a day, or 300/s average, so trying to pick out a small number of failing ones out of that firehose of data was going to be difficult. It's a good thing we like a challenge.\n\n## The first clue\n\nWe got in touch with one of our customers (thanks Hubert Hölzl from Atalanda) who was seeing the problem several times a day, which gave us a foothold. Hubert was able to supply the relevant public IP address, which meant we could run some packet captures on our frontend HAproxy nodes, to attempt to isolate the problem from a smaller data set than 'All of the SSH traffic.' Even better, they were using the [alternate-ssh port](/blog/gitlab-dot-com-now-supports-an-alternate-git-plus-ssh-port/) which means we only had two HAProxy servers to look at, not 16.\n\nTrawling through these packet traces was still not fun; despite the constraints, there was ~500MB of packet capture from about 6.5 hours. We found the short-running connections, in which the TCP connection was established, the client sent a version string identifier, and then our HAProxy immediately tore down the connection with a proper TCP FIN sequence. This was the first great clue. It told us that it was definitely the GitLab.com end that was closing the connection, not something in between the client and us, meaning this was a problem we could debug.\n\n### Lesson #1: In Wireshark, the Statistics menu has a wealth of useful tools that I'd never really noticed until this endeavor.\n\nIn particular, 'Conversations' shows you a basic breakdown of time, packets, and bytes for each TCP connection in the capture, which you can sort. I *should* have used this at the start, instead of trawling through the captures manually. In hindsight, connections with small packet counts was what I was looking for, and the Conversations view shows this easily. I was then able to use this feature to find other instances, and verify that the first instance I found was not just an unusual outlier.\n\n## Diving into logs\n\nSo what was causing HAProxy to tear down the connection to the client? It certainly seemed unlikely that it was doing it arbitrarily, and there must be a deeper reason; another layer of [turtles](https://en.wikipedia.org/wiki/Turtles_all_the_way_down), if you will. The HAProxy logs seemed like the next place to check. Ours are stored/available in GCP BigQuery, which is handy because there's a lot of them, and we needed to slice 'n dice them in lots of different ways. But first, we were able to identify the log entry for one of the incidents from the packet capture, based on time and TCP ports, which was a major breakthrough. The most interesting detail in that entry was the `t_state` (Termination State) attribute, which was `SD`. From the HAProxy documentation:\n```\n    S: aborted by the server, or the server explicitly refused it\n    D: the session was in the DATA phase.\n```\n`D` is pretty clear; the TCP connection had been properly established, and data was being sent, which matched the packet capture evidence. The `S` means HAProxy received an RST, or an ICMP failure message from the backend. There was no immediate clue as to which case was occurring or possible causes. It could be anything from a networking issue (e.g. glitch or congestion) to an application-level problem. Using BigQuery to aggregate by the Git backends, it was clear it wasn't specific to any VM. We needed more information.\n\nSide note: It turned out that logs with `SD` weren't unique to the problem we were seeing. On the alternate-ssh port we get a lot of scanning for HTTPS, which leads to `SD` being logged when the SSH server sees a TLS ClientHello message while expecting an SSH greeting. This created a brief detour in our investigation.\n\nOn capturing some traffic between HAProxy and the Git server and using the Wireshark statistics tools again, it was quickly obvious that SSHD on the Git server was tearing down the connection with a TCP FIN-ACK immediately after the TCP three-way handshake; HAProxy still hadn't sent the first data packet but was about to, and when it did very shortly after, the Git server responded with a TCP RST. And thus we had the reason for HAProxy to log a connection failure with `SD`. SSH was closing the connection, apparently deliberately and cleanly, with the RST being just an artifact of the SSH server receiving a packet after the FIN-ACK, and doesn't mean anything else here.\n\n## An illuminating graph\n\nWhile watching and analyzing the `SD` logs in BigQuery, it became apparent that there was quite a bit of clustering going on in the time dimension, with spikes in the first 10 seconds after the top of each minute, peaking at about 5-6 seconds past:\n\n![Connection errors grouped by second](https://gitlab.com/gitlab-com/gl-infra/infrastructure/uploads/72cd1b763c51781fa4224495f059afb5/image.png){: .shadow.medium.center}\nConnection errors, grouped by second-of-the-minute\n{: .note.text-center}\n\nThis graph is created from data collated over a number of hours, so the fact that the pattern is so substantial suggests the cause is consistent across minutes and hours, and possibly even worse at specific times of the day. Even more interesting, the average spike is 3x the base load, which means we have a fun scaling problem and simply provisioning 'more resource' in terms of VMs to meet the peak loads would potentially be prohibitively expensive. This also suggested that we were hitting some hard limit, and was our first clue to an underlying systemic problem, which I have called \"the tyranny of the clock.\"\n\nCron, or similar scheduling systems, often don't have sub-minute accuracy, and if they do, it isn't used very often because humans prefer to think about things in round numbers. Consequently, jobs will run at the start of the minute or hour or at other nice round numbers. If they take a couple of seconds to do any preparations before they do a `git fetch` from GitLab.com, this would explain the connection pattern with increases a few seconds into the minute, and thus the increase in errors around those times.\n\n### Lesson #2: Apparently a lot of people have time synchronization (via NTP or otherwise) set up properly.\n\nIf they hadn't, this problem wouldn't have emerged so clearly. Yay for NTP!\n\nSo what could be causing SSH to drop the connection?\n\n## Getting close\n\nLooking through the documentation for SSHD, we found MaxStartups, which controls the maximum number of connections that can be in the pre-authenticated state. At the top of the minute, under the stampeding herd of scheduled jobs from around the internet, it seems plausible that we were exceeding the connections limit. MaxStartups actually has three numbers: the low watermark (the number at which it starts dropping connections), a percentage of connections to (randomly) drop for any connections above the low watermark, and an absolute maximum above which all new connections are dropped. The default is 10:30:100, and our setting at this time was 100:30:200, so clearly we had increased the connections in the past. Perhaps it was time to increase it again.\n\nSomewhat annoyingly, the version of openssh on our servers is 7.2, and the only way to see that MaxStartups is being breached in that version is to turn on Debug level logging. This is an absolute firehose of data, so we carefully turned it on for a short period on only one server. Thankfully within a couple of minutes it was obvious that MaxStartups was being breached, and connections were being dropped early as a result,.\n\nIt turns out that OpenSSH 7.6 (the version that comes with Ubuntu 18.04) has better logging about MaxStartups; it only requires Verbose logging to get it. While not ideal, it's better than Debug level.\n\n### Lesson #3: It is polite to log interesting information at default levels and deliberately dropping a connection for any reason is definitely interesting to system administrators.\n\nSo now that we have a cause for the problem, how can we address it? We can bump MaxStartups, but what will that cost? Definitely a small bit of memory, but would it cause any untoward downstream effects? We could only speculate, so we had to just try it. We bumped the value to 150:30:300 (a 50% increase). This had a great positive effect, and no visible negative effect (such as increased CPU load):\n\n![Before and after graph](https://gitlab.com/gitlab-com/gl-infra/production/uploads/047a4859caafc6681c9d034c202418b9/image.png){: .shadow.medium.center}\n\nBefore and after bumping MaxStartups by 50%\n{: .note.text-center}\n\nNote the substantial reduction after 01:15. We've clearly eliminated a large proportion of the errors, although a non-trivial amount remained. Interestingly, these are clustered around round numbers: the top of the hour, every 30 minutes, 15 minutes, and 10 minutes. Clearly the tyranny of the clock continues. The top of the hour saw the biggest peaks, which seems reasonable in hindsight; a lot of people will simply schedule their jobs to run every hour at 0 minutes past the hour. This finding was more evidence that confirms our theory that it was scheduled jobs causing the spikes, and that we were on the right path with this error being due to a numerical limit.\n\nDelightfully, there were no obvious negative effects. CPU usage on the SSH servers stayed about the same and didn't cause any noticeable increase in load. Even though we were unleashing more connections that would previously have been dropped, and doing so at the busiest times. This was promising.\n\n## Rate limiting\n\nAt this point we weren't keen on simply bumping MaxStartups higher; while our 50% increase to-date had worked, it felt pretty crude to keep on pushing this arbitrarily higher. Surely there was something else we could do.\n\nMy search took me to the HAProxy layer that we have in front of the SSH servers. HAProxy has a nice 'rate-limit sessions' option for its frontend listeners. When configured, it constrains the new TCP connections per-second that the frontend will pass through to backends, and leaves additional incoming connections on the TCP socket. If the incoming rate exceeds the limit (measured every millisecond) the new connections are simply delayed. The TCP client (SSH in this case) simply sees a delay before the TCP connection is established, which is delightfully graceful, in my opinion. As long as the overall rate never spiked too high above the limit for too long, we'd be fine.\n\nThe next question was what number we should use. This is complicated by the fact that we have 27 SSH backends, and 18 HAproxy frontends (16 main, two alt-ssh), and the frontends don't coordinate amongst themselves for this rate limiting. We also had to take into account how long it takes a new SSH session to make it past authentication: Assuming MaxStartups of 150, if the auth phase took two seconds we could only send 75 new sessions per second to the each backend. The [note on the issue](https://gitlab.com/gitlab-com/gl-infra/infrastructure/issues/7168#note_191678023) has the derivation of the math, and I won't recount it in detail here, except to note that there are four quantities needed to calculate the rate-limit: the counts of both server types, the value of MaxStartups, and `T`, which is how long the SSH session takes to auth. `T` is critical, but we could only estimate it. You might speculate how well I did at this estimate, but that would spoil the story. I went with two seconds for now, and came to a rate limit per frontend of approximately 112.5, and rounded down to 110.\n\nWe deployed. Everything was happy, yes? Errors tended to zero, and children danced happily in the streets? Well, not so much. This change had no visible effect on the error rates. I will be honest here, and say I was rather distressed. We had missed something important, or misunderstood the problem space entirely.\n\nSo we went back to logs (and eventually the HAProxy metrics), and were able to verify that the rate limiting was at least working to limit to the number we specified, and that historically this number had been higher, so we were successfully constraining the rate at which connections were being dispatched. But clearly the rate was still too high, and not only that, it wasn't even *close* enough to the right number to have a measurable impact. Looking at the selection of backends (as logged by HAproxy) showed an oddity: At the top of the hour, the backend connections were not evenly distributed across all the SSH servers. In the sample time chosen, it varied from 30 to 121 in a given second, meaning our load balancing wasn't very balanced. Reviewing the configuration showed we were using `balance source`, so that a given client IP address would always connect to the same backend. This might be good if you needed session stickiness, but this is SSH and we have no such need. It was deliberately chosen some time ago, but there was no record as to why. We couldn't come up with a good reason to keep it, so we tried changing to leastconn, which distributes new incoming connections to the backend with the least number of current connections. This was the result, of the CPU usage on our SSH (Git) fleet:\n\n![Leastconn before and after](https://gitlab.com/gitlab-com/gl-infra/infrastructure/uploads/b006877c1e45ad0255a316a96750402c/before-after-leastconn-change.png){: .shadow.medium.center}\n\nBefore and after turning on leastconn\n{: .note.text-center}\n\nClearly leastconn was a good idea. The two low-usage lines are our [Canary](/handbook/engineering/infrastructure/library/canary/) servers and can be ignored, but the spread on the others before the change was 2:1 (30% to 60%), so clearly some of our backends were much busier than others due to the source IP hashing. This was surprising to me; it seemed reasonable to expect the range of client IPs to be sufficient to spread the load much more evenly, but apparently a few large outliers were enough to skew the usage significantly.\n\n### Lesson #4: When you choose specific non-default settings, leave a comment or link to documentation/issues as to why, future people will thank you.\n\n This transparency is [one of GitLab's core values](https://handbook.gitlab.com/handbook/values/#say-why-not-just-what).\n\nTurning on leastconn also helped reduce the error rates, so it is something we wanted to continue with. In the spirit of experimenting, we dropped the rate limit lower to 100, which further reduced the error rate, suggesting that perhaps the initial estimate for `T` was wrong. But if so, it was too small, leading to the rate limit being too high, and even 100/s felt pretty low and we weren't keen to drop it further. Unfortunately for some operational reasons these two changes were just an experiment, and we had to roll back to `balance source` and rate limit of 100.\n\nWith the rate limit as low as we were comfortable with, and leastconn insufficient, we tried increasing MaxStartups: first to 200 with some effect, then to 250. Lo, the errors all but disappeared, and nothing bad happened.\n\n### Lesson #5: As scary as it looks, MaxStartups appears to have very little performance impact even if it's raised much higher than the default.\n\nThis is probably a large and powerful lever we can pull in future, if necessary. It's possible we might notice problems if it gets into the thousands or tens of thousands, but we're a long way from that.\n\nWhat does this say about my estimate for `T`, the time to establish and authenticate an SSH session? Reverse engineering the equation, knowing that 200 wasn't quite enough for MaxStartups, and 250 is enough, we could calculate that `T` is probably between 2.7 and 3.4 seconds. So the estimate of two seconds wasn't far off, but the actual value was definitely higher than expected. We'll come back to this a bit later.\n\n## Final steps\n\nLooking at the logs again in hindsight, and after some contemplation, we discovered that we could identify this specific failure with t_state being `SD` and b_read (bytes read by client) of 0. As noted above, we handle approximately 26-28 million SSH connections per day. It was unpleasant to discover that at the worst of the problem, roughly 1.5% of those connections were being dropped badly. Clearly the problem was bigger than we had realised at the start. There was nothing about this that we couldn't have identified earlier (right back when we discovered that t_state=\"SD\" was indicative of the issue), but we didn't think to do so, and we should have. It might have increased how much effort we put in.\n\n### Lesson #6: Measure the actual rate of your errors as early as possible.\n\nWe might have put a higher priority on this earlier had we realized the extent of the problem, although it was still dependent on knowing the identifying characteristic.\n\nOn the plus side, after our bumps to MaxStartups and rate limiting, the error rate was down to 0.001%, or a few thousand per day. This was better, but still higher than we liked. After we unblocked some other operational matters, we were able to formally deploy the leastconn change, and the errors were eliminated entirely. We could breathe easy again.\n\n## Further work\n\nClearly the SSH authentication phase is still taking quite a while, perhaps up to 3.4 seconds. GitLab can use [AuthorizedKeysCommand](https://docs.gitlab.com/ee/administration/operations/fast_ssh_key_lookup.html) to look up the SSH key directly in the database. This is critical for speedy operations when you have a large number of users, otherwise SSHD has to sequentially read a very large `authorized_keys` file to look up the public key of the user, and this doesn't scale well. We implement the lookup with a little bit of ruby that calls an internal HTTP API. [Stan Hu](/company/team/#stanhu), engineering fellow and our resident source of GitLab knowledge, identified that the unicorn instances on the Git/SSH servers were experiencing substantial queuing. This could be a significant contributor to the ~3-second pre-authentication stage, and therefore something we need to look at further, so investigations continue. We may increase the number of unicorn (or puma) workers on these nodes, so there's always a worker available for SSH. However, that isn't without risk, so we will need to be careful and measure well. Work continues, but slower now that the core user problem has been mitigated. We may eventually be able to reduce MaxStartups, although given the lack of negative impact it seems to have, there's little need. It would make everyone more comfortable if OpenSSH let us see the how close we were to hitting MaxStartups at any point, rather than having to go in blind and only find out we were close when the limit is breached and connections are dropped.\n\nWe also need to alert when we see HAProxy logs that indicate the problem is occurring, because in practice there's no reason it should ever happen. If it does, we need to increase MaxStartups further, or if resources are constrained, add more Git/SSH nodes.\n\n## Conclusion\n\nComplex systems have complex interactions, and there is often more than one lever that can be used to control various bottlenecks. It's good to know what tools are available because they often have trade-offs. Assumptions and estimates can also be risky. In hindsight, I would have attempted to get a much better measurement of how long authentication takes, so that my `T` estimate was better.\n\nBut the biggest lesson is that when large numbers of people schedule jobs at round numbers on the clock, it leads to really interesting scaling problems for centralized service providers like GitLab. If you're one of them, you might like to consider putting in a random sleep of maybe 30 seconds at the start, or pick a random time during the hour *and* put in the random sleep, just to be polite and fight the tyranny of the clock.\n\nCover image by [Jon Tyson](https://unsplash.com/@jontyson) on [Unsplash](https://unsplash.com)\n{: .note}\n",[702,704,1286],{"slug":6009,"featured":6,"template":678},"tyranny-of-the-clock","content:en-us:blog:tyranny-of-the-clock.yml","Tyranny Of The Clock","en-us/blog/tyranny-of-the-clock.yml","en-us/blog/tyranny-of-the-clock",{"_path":6015,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6016,"content":6022,"config":6029,"_id":6031,"_type":16,"title":6032,"_source":17,"_file":6033,"_stem":6034,"_extension":20},"/en-us/blog/a-single-codebase-for-gitlab-community-and-enterprise-edition",{"title":6017,"description":6018,"ogTitle":6017,"ogDescription":6018,"noIndex":6,"ogImage":6019,"ogUrl":6020,"ogSiteName":692,"ogType":693,"canonicalUrls":6020,"schema":6021},"Why GitLab uses one codebase for Community & Enterprise","Dive into our decision to switch GitLab over to a single codebase as we review some of the benefits and challenges. Learn more here!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671631/Blog/Hero%20Images/merge-ce-ee-codebases.jpg","https://about.gitlab.com/blog/a-single-codebase-for-gitlab-community-and-enterprise-edition","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Update: Why GitLab uses a single codebase for Community and Enterprise editions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Yorick Peterse\"}],\n        \"datePublished\": \"2019-08-23\",\n      }",{"title":6023,"description":6018,"authors":6024,"heroImage":6019,"date":6026,"body":6027,"category":14,"tags":6028},"Update: Why GitLab uses a single codebase for Community and Enterprise editions",[6025],"Yorick Peterse","2019-08-23","\n\nIn [\"GitLab might move to a single Rails\ncodebase\"](/blog/merging-ce-and-ee-codebases/), we announced that GitLab\nmight move to using a single codebase for GitLab Community Edition (CE) and\nGitLab Enterprise Edition (EE). Since then we have decided to continue moving\ntoward a single codebase. In this article, I highlight some of the challenges,\nrequired work, and steps remaining to complete the switch.\n\n## What is codebase?\n\nWhat is a codebase, I hear you ask? Well, a codebase (which is at times spelled as code base) is essentially the entire collection of source \ncode that is required for a program or application to function properly. This can include things like configuration \nfiles, libraries, and other dependencies, in addition to the actual application code. The codebase is \ntypically stored in a single location, often within a source control repository, where multiple developers \ncan access and make contributions to it.\n\nMultiple developers can use and contribute to a single codebase, which is generally retained within a source control \nrepository. As such, it can assist with the backup and versioning of overlapping code \nmodifications/alterations. This can be especially important for larger projects that require a lot of coordination \nand communication between team members. With everyone working from the same codebase, it becomes easier \nto ensure that changes are made consistently and in a way that does not break the application.\n\n## Why GitLab uses a single codebase?\n\nPrior to using a single codebase, for years CE and EE used two different repositories for the Rails application.\nBy using separate repositories we could separate proprietary code from code that\nis free software. On the surface this seems like a good idea for different\nreasons (e.g., licensing), but over the years the drawbacks\nbegan to outweigh the benefits.\n\nWe [mention some of these drawbacks in a previous\narticle](/blog/merging-ce-and-ee-codebases/), but more or less they all\ncome down to the same core problem: It made the development process more complex\nthan necessary. For example, we ended up with around 150 merge requests spread\nacross CE and EE for a security release from several months ago. While the\nprocess of merging these merge requests is automated, we ran into a variety of\nissues (e.g. failing tests) that required manual intervention. We could have\nreduced the number of merge requests by half if we used a single repository,\ncreating less work for developers and release managers.\n\nToward the end of 2018, I felt that we were running out of time and had to do\nsomething about the separation of CE and EE. We had always tried to avoid\nmerging the two repositories due to the complexity and time involved, but it\nstarted to become more and more clear we had no other option. [Marin\nJankovski](/company/team/#maxlazio), Delivery engineering manager, and I made a\nplan to merge the two repositories. Marin wrote a [design\ndocument](/handbook/engineering/infrastructure/library/merge-ce-ee-codebases/)\nthat outlined the details of it all. The design document showed what challenges\nwe faced, and gathered the critical support required for the largest engineering\nprojects at GitLab to date.\n\n## What is the difference between a codebase and a repository?\n\nThe basic difference between a codebase and a repository is that one is for old code and one is for new code. \n\nBut more specifically...\n\nA codebase can be either a public or private place to store large amounts of code that is actively being iterated on in a version control system, and typically stored in a source control repository in a version control system.\n\nA source code repository is where an archived version of the code being worked on is kept. It’s also a place to house documentation, notes, web pages, and other items in your repository. \n \n## Working toward a single codebase\n\nMoving to a single codebase is not something we can do overnight for a project\nthe size of GitLab. Workflows must be adapted, developers need to adjust to the\nnew setup, and automation requires extensive changes.\n\nOne of the biggest challenges from an engineering perspective was to come up\nwith a way to transparently remove proprietary code from GitLab when building a\nCE release. A naive approach might involve a script that removes known bits of\nproprietary code. While this might work for small projects that don't change\noften, this was not going to work for a project the size of GitLab.\n\nRuby provides us with a solution to this problem. In Ruby, you can create a\nmodule and inject it into another module or class. Once injected, the\nfunctionality of the module becomes available to the target module or class.\nThis is best illustrated with a simple example:\n\n```ruby\nclass Person\n  def initialize(name)\n    @name = name\n  end\n\n  def name\n    @name\n  end\nend\n\nmodule Greet\n  def greet\n    \"Hello #{name}\"\n  end\nend\n\nPerson.include(Greet)\n\nalice = Person.new('Alice')\n\nalice.greet # => \"Hello Alice\"\n```\n\nHere we define a class `Person`, followed by a module that is used to create a\nmessage greeting a person. Next, we include it into the `Person` class, at which\npoint we can use the module's methods for instances of the `Person` class. The\nresult is the message \"Hello Alice.\"\n\nWhile this example is not exciting, using a setup like this allows us to\nmove proprietary code to separate modules, and inject these modules when GitLab\nEE is used. For GitLab CE, we would remove these modules, and the code injecting\nthese modules would have to disable itself transparently and automatically.\n\nGitLab EE has been using this setup since late 2016 with all EE modules residing\nin a separate \"ee\" directory, but in a limited number of places. This meant that\nin some places EE and CE code got mixed together, while in other places the two\nare separate. For example, we had code like this:\n\n```diff\n def lfs_upload_access?\n   return false unless project.lfs_enabled?\n   return false unless has_authentication_ability?(:push_code)\n+  return false if project.above_size_limit? || objects_exceed_repo_limit?\n\n   lfs_deploy_token? || can?(user, :push_code, project)\n end\n```\n\nHere EE added a line into an existing method without using a separate module,\nmaking it difficult to remove the EE-specific code when for CE.\n\nBefore we could move to a single codebase, we had to separate EE-specific code from code shared between CE and EE. Due to the amount\nof work necessary, we divided the work into two departments: backend and\nfrontend. For every department we created issues outlining the work to do for\nthe various parts of the codebase. We even included the [exact lines of code\nthat had to change directly in the created\nissues](https://gitlab.com/gitlab-org/gitlab-ee/issues/9506), making it simple\nto see what one had to do. Each department also had an engineer assigned as the\nlead engineer, responsible for taking on the most difficult challenges.  [Filipa\nLacerda](/company/team/#FilipaLacerda), senior frontend engineer of Verify (CI)\nand Delivery, was in charge of frontend code. [As the Delivery backend engineer,\nI myself](/company/team/#yorickpeterse) was in charge of backend code.\n\nSome changes were small and took a short amount of time, with others were big\nand took weeks. One of my big challenges was to make sure CE and EE [use the same\ndatabase schema](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/26940),\nchanging just under 24,000 lines of code over a two-month period.\n\n>In total the work involved 55\ndifferent engineers submitting more than 600 merge requests, closing just under\n400 issues, and changing nearly 1.5 million lines of code\n\nFilipa spent a lot of time creating 168 frontend issues outlining specific tasks\nas well as submitting 124 merge requests to address the majority of these\nissues. Resolving some of these issues required getting rid of some\ntechnical debt first, such as [breaking up large chunks of code into smaller\nchunks](https://gitlab.com/gitlab-org/gitlab-ee/merge_requests/14592), and\ncoming up with a way [to create EE-specific Vue.js\ntemplates](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/25650).\n\nWhile Filipa and I took on the biggest challenges, in total the work involved 55\ndifferent engineers submitting more than 600 merge requests, closing just under\n400 issues, and changing nearly 1.5 million lines of code.\n\n## Moving toward a single codebase\n\nWith most of the work done, we could start looking into what project setup we\nwould use for a single codebase. We came up with three different approaches:\n\n### 1. Single codebase: moving all development into gitlab-ce\n\nAll code and development is moved into the gitlab-ce repository. The gitlab-ee\nrepository is archived, and a separate repository is set up as a mirror of\ngitlab-ce, called gitlab-foss. Proprietary code is removed from this mirror\nautomatically.\n\nSince most of GitLab's development takes place in the current gitlab-ce\nrepository, this setup would reduce the number of issues to move as well as merge requests to close. A downside of this approach is that clones of\nthe gitlab-ce repository will include proprietary code.\n\n### 2. Single codebase: moving all development into gitlab-ee\n\nAll code and development is moved into the gitlab-ee repository. The gitlab-ce\nrepository remains as is in terms of code, and will become a mirror of gitlab-ee. Like\nthe first option, proprietary code is removed from this mirror automatically.\n\nThis setup means that users cloning gitlab-ce don't end up with proprietary code\nin their copy of gitlab-ce.\n\n### 3. Single codebase: moving all development into a new repository\n\nWe set up an entirely new repository called \"gitlab,\" and move all code and\ndevelopment into this repository. The gitlab-ce and gitlab-ee repositories will\nbecome read-only. A mirror is set up (called \"gitlab-foss\") that mirrors the new\n\"gitlab\" repository, without including proprietary code.\n\n## Deciding which single codebase approach to take\n\n[Having evaluated all the benefits and\ndrawbacks](https://www.youtube.com/watch?v=LV_AHeL5sIo), we decided to go with\noption two: move development into gitlab-ee. This approach has several benefits:\n\n1. The code of the gitlab-ce repository remains as is, and won't include any\n   proprietary code.\n1. We do not need a separate mirror repository that does not include proprietary\n   code. Instead, we rename the gitlab-ce repository to \"gitlab-foss.\" We are\n   renaming the repository since having \"gitlab\" and \"gitlab-ce\" as project\n   names could be confusing.\n1. Users building CE from source don't end up with proprietary code in their\n   copy of the gitlab-ce repository.\n1. We keep the Git logs of both gitlab-ce and gitlab-ee, instead of losing the\n   logs (this depends a bit on how we'd move repositories around).\n1. It requires the least amount of changes to our workflow and tooling.\n1. Using a single project and issue tracker for both CE and EE makes it easier\n   to search for issues.\n\nIssues created in the gitlab-ce project will move to the gitlab-ee project,\nwhich we will rename to just \"gitlab\" (or \"gitlab-org/gitlab\" if you include the\ngroup name). This project then becomes the single source of truth, and is used\nfor creating issues for both the CE and EE distributions.\n\nMoving merge requests across projects is not possible, so we will close any open\nmerge requests. Authors of these merge requests will have to resubmit them to\nthe \"gitlab\" (called \"gitlab-ee\" before the rename) project.\n\nWhen moving issues or closing merge requests, a bot will also post a comment\nexplaining why this is done, what steps the author of a merge request has to\ntake, and where one might find more information about these procedures.\n\nPrior to the single codebase setup, GitLab community contributions would be submitted\nto the gitlab-ce repository. In the single codebase, contributions are instead\nsubmitted to the new gitlab repository (\"gitlab-org/gitlab\"). EE-specific code\nresides in a \"ee\" directory in the repository. Code outside of this directory\nwill be free and open source software, using the same license as the gitlab-ce\nrepository currently uses. This means that as long as you do not change anything\nin this \"ee\" directory, the only change for GitLab community contributions is the use\nof a different repository.\n\nOur current plan is to have a single codebase the first week of September.  GitLab 12.3 will be the first release based on a single codebase.\n\nUsers that clone GitLab EE and/or GitLab CE from source should update their Git\nremote URLs after the projects are renamed. This is not strictly necessary as\nGitLab will redirect Git operations to the new repository. For users of our\nOmnibus packages and Docker images nothing changes.\n\nThose interested in learning more about what went on behind the scenes can refer\nto the following resources:\n\n* [A video in which we discusses the benefits and drawbacks of the various\n  project setups](https://www.youtube.com/watch?v=LV_AHeL5sIo)\n* [The issue detailing the remaining work to do](https://gitlab.com/gitlab-org/gitlab-ee/issues/13304)\n* [A list of all the single codebase merge requests](https://gitlab.com/groups/gitlab-org/-/merge_requests?scope=all&utf8=%E2%9C%93&state=merged&label_name%5B%5D=single%20codebase)\n\nCover image from [Unsplash](https://images.unsplash.com/photo-1512217536414-d92543c79ca1)\n{: .note}\n",[915,703],{"slug":6030,"featured":6,"template":678},"a-single-codebase-for-gitlab-community-and-enterprise-edition","content:en-us:blog:a-single-codebase-for-gitlab-community-and-enterprise-edition.yml","A Single Codebase For Gitlab Community And Enterprise Edition","en-us/blog/a-single-codebase-for-gitlab-community-and-enterprise-edition.yml","en-us/blog/a-single-codebase-for-gitlab-community-and-enterprise-edition",{"_path":6036,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6037,"content":6043,"config":6048,"_id":6050,"_type":16,"title":6051,"_source":17,"_file":6052,"_stem":6053,"_extension":20},"/en-us/blog/a-look-ahead-for-gitlab-cicd",{"title":6038,"description":6039,"ogTitle":6038,"ogDescription":6039,"noIndex":6,"ogImage":6040,"ogUrl":6041,"ogSiteName":692,"ogType":693,"canonicalUrls":6041,"schema":6042},"New up and coming GitLab CI/CD Features","DAG, Multi-project Pipelines, Runner Setup for Kubernetes and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666889/Blog/Hero%20Images/photo-cicd12xlookahead.jpg","https://about.gitlab.com/blog/a-look-ahead-for-gitlab-cicd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"New up and coming GitLab CI/CD Features\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jason Yavorska\"}],\n        \"datePublished\": \"2019-08-07\",\n      }",{"title":6038,"description":6039,"authors":6044,"heroImage":6040,"date":6045,"body":6046,"category":14,"tags":6047},[4945],"2019-08-07","\n_Since this blog post was published, we have updated our planning based on emerging priorities and customer need. For the latest on what we've got coming next, check out our [CI/CD direction page](/direction/ops/), which is always current._\n\nHey everyone, [Jason Yavorska](https://gitlab.com/jyavorska) here – product manager for CI/CD at GitLab. Back in June we\nreached the mid-point of the year and we're heading into our big 12.0 release, so I took the opportunity to\nsummarize some of the [highlights of our 11.x series of releases](/blog/look-back-on-11-11-cicd/).\nHopefully you had a chance to read it, if not, please take a moment to scan through and I bet you'll find an\ninteresting feature or two that can help improve your pipelines.\n\nWe're a couple of releases into the 12.x cycle now and I couldn't wait to share some\nof the things that we're looking forward to delivering the remainder of this year. Some of the features I am most excited about include DAG, a directed acyclic graph that makes it easy to run pipeline steps out of order, expanding our pipelines for merge requests/results feature to also work with forks, as well as making multi-project pipelines a Core feature. With about 3.44M job instances per week/13.76M per month, GitLab CI is growing at a rapid rate to help our customers and users with their deployment needs. Read on below to learn more about all of the exciting CI/CD features in the 12.0 series of releases that will help you to deploy your code quickly.\n\n## What's recent\n\nIn 12.0, we released [visual reviews](https://docs.gitlab.com/ee/ci/review_apps/index.html#visual-reviews),\nwhich allows users to provide issue feedback directly from the review apps that\nyour pipelines create. This makes it easy for all your team members to provide accurate\nfeedback on the changes you're making. We also added [collapsible job logs](https://docs.gitlab.com/ee/ci/pipelines/index.html#expand-and-collapse-job-log-sections),\nmaking output of pipelines easier to use, and enabled [multiple extends](https://docs.gitlab.com/ee/ci/yaml/#extends)\nfor pipeline jobs to make templatizing behaviors in your configuration even easier.\n\n![Visual Review Apps](https://about.gitlab.com/images/12_0/visual-review-apps.png \"Visual Review Apps\"){: .shadow.medium.center}\n\n[Visual Review Apps](https://docs.gitlab.com/ee/ci/review_apps/index.html#visual-reviews) were released in GitLab 12.0\n{: .note .text-center}\n\nIn 12.1, we delivered [parallel execution for merge trains](https://docs.gitlab.com/ee/ci/pipelines/merge_trains.html),\nexpanding on our [pipelines for merged results](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html)\nto make it very easy to automatically build and test a series of merge requests heading\ninto the same target branch in a fast, safe, and efficient way. For GitLab Pages we also\nadded [automatic HTTPS certificate renewal](https://docs.gitlab.com/ee/user/project/pages/custom_domains_ssl_tls_certification/lets_encrypt_integration.html),\nand completely refactored the GitLab Runner to be able to be [extensible for custom behaviors](http://docs.gitlab.com/runner/executors/custom.html),\nenabling many new kinds of operation modes for your runners including but not limited to\nsupporting any kind of proprietary virtualization environment.\n\n## What's next\n\nNow that you're up to speed with the first couple of 12.x releases, let's look ahead to what's coming next in each monthly release from 12.2 this month to 12.6 in December.\n\n## 12.2 (August 22)\n\n_Since this blog post was published, we have updated our planning based on emerging priorities and customer need. For the latest on what we've got coming next, check out our [CI/CD direction page](/direction/ops/), which is always current._\n\n12.2 is just around the corner and it's also looking to be a big one.\n\nOne really exciting feature for this release is that we're adding a hybrid directed acyclic graph (DAG) to GitLab CI.\nThis is really just a fancy way of saying you'll be able to run pipeline steps out of order, breaking the\nstage sequencing you're familiar with in GitLab, and allowing jobs to relate to each other directly. This can\nbe valuable for monorepo situations where you have different folders in your repo that can build, test, and maybe\neven deploy independently, or in general it can provide a nice speed boost for your pipeline steps that relate to\neach other (for example, things like artifact processing or sequential test runs.) Read more in our [public issue](https://gitlab.com/gitlab-org/gitlab-ce/issues/47063)\nabout how this great feature is going to work.\n\n![Directed Acyclic Graph](https://about.gitlab.com/images/blogimages/dag_execution.png \"Directed Acyclic Graph\"){: .shadow.medium.center}\n\nOut of order execution using the [Directed Acyclic Graph](https://gitlab.com/gitlab-org/gitlab-ce/issues/47063)\n{: .note .text-center}\n\nIn addition to the DAG, we're rethinking the way that [rules can be set up for pipelines](https://gitlab.com/gitlab-org/gitlab-ce/issues/60085),\nmaking it much easier to understand what a job is going to do compared with trying to figure out how a collection\nof `only/except` rules interact with each other. Another highlight is that we're adding the ability to\n[control behavior for individual users with Feature Flags](https://gitlab.com/gitlab-org/gitlab-ee/issues/11459) along with\n[percentage rollout across all users](https://gitlab.com/gitlab-org/gitlab-ee/issues/8240). These will give you a lot of\nflexibility to [progressively control](/direction/ops/#progressive-delivery) how changes are rolled out to your users\neven when the code is already in production.\n\n## 12.3 (September 22)\n\n_Since this blog post was published, we have updated our planning based on emerging priorities and customer need. For the latest on what we've got coming next, check out our [CI/CD direction page](/direction/ops/), which is always current._\n\nThe individual change in the 12.3 release that I'm most excited about has got to be\n[associating a milestone with a release](https://gitlab.com/gitlab-org/gitlab-ce/issues/62402). One of the greatest\nstrengths of GitLab is the connected ecosystem of features – by tying a release to a milestone, it becomes\npossible to connect all kinds of interesting data in GitLab to the release – issues, merge requests, and more, all\nat your fingertips and curated automatically by GitLab.\n\nWe're also going to be making [runner setup for Kubernetes](https://gitlab.com/gitlab-org/gitlab-ce/issues/63768)\nrequire just a single click to get going, and making a key architectural change to GitLab Pages that will\n[bring initial availability time for pages site down to nearly instantaneous](https://gitlab.com/gitlab-org/gitlab-ce/issues/61929).\n\n## 12.4 (October 22)\n\n_Since this blog post was published, we have updated our planning based on emerging priorities and customer need. For the latest on what we've got coming next, check out our [CI/CD direction page](/direction/ops/), which is always current._\n\nFirst up, we're planning on adding a [Hashicorp Vault integration](https://gitlab.com/gitlab-org/gitlab-ce/issues/61053) that will let you tie your\nGitLab CI pipelines to your Vault instance, making it possible to keep crucial build and deployment secrets outside\nof GitLab entirely.\n\nWe're also [expanding our pipelines for merge requests/results feature to also work with forks](https://gitlab.com/gitlab-org/gitlab-ee/issues/11934),\nand (building on top of the newly associated milestone) delivering an MVC for fully automated [evidence collection for releases](https://gitlab.com/gitlab-org/gitlab-ce/issues/56030).\nThis means that things like test results, pipeline outputs, merge requests, and issues will have a snapshot\navailable for auditing and review in the context of a release, all collected automatically from throughout GitLab\nwithout having to write a line of code.\n\n## 12.5 (November 22)\n\n_Since this blog post was published, we have updated our planning based on emerging priorities and customer need. For the latest on what we've got coming next, check out our [CI/CD direction page](/direction/ops/), which is always current._\n\nFor 12.5, we plan to tackle Helm v3 charts by providing features in our container registry to\nmanage these. [Helm v3](https://helm.sh/blog/helm-3-preview-pt1/) changes a lot about how charts work, and\nwe want to ensure that GitLab is there with you as you start to adopt this very different, but powerful new way\nof working.\n\nWe also plan to revisit [how workspaces are defined and shared](https://gitlab.com/gitlab-org/gitlab-ce/issues/62802),\nmaking it easier to build up a common staging area that can be shared by different jobs/pipelines in an easier-to-use,\nmore natural way than by using the cache or artifacts in GitLab today. Last but not least, we're improving on\nour testing parallelization features by making it possible to [leave the parallelization tuning to GitLab itself](https://gitlab.com/gitlab-org/gitlab-ee/issues/12282).\n\n## 12.6 (December 22)\n\n_Since this blog post was published, we have updated our planning based on emerging priorities and customer need. For the latest on what we've got coming next, check out our [CI/CD direction page](/direction/ops/), which is always current._\n\nFor the holidays we're planning on [making multi-project pipelines a Core feature](https://gitlab.com/gitlab-org/gitlab-ce/issues/63497),\nbringing this powerful capability to all of our users. More and more we're hearing that teams are using multi-project\npipelines in all kinds of interesting ways to solve unique problems, and we want to make this feature available to\neveryone who can benefit. EDIT 2020-01-02: We resolved [this issue](https://gitlab.com/gitlab-org/gitlab/issues/31573) back in 12.4 where the trigger keyword was not working in certain cases, which satisfied the request of the folks in that issue to open source the feature. There are potential executive dashboards for cross-project pipelines in the future which will be paid features, but using triggering is in core and working fine. If there are any use cases that are not working for you, please ping me (@jyavorska) in [gitlab#29626](https://gitlab.com/gitlab-org/gitlab/issues/29626) and I'd be happy to take a look.\n\nWe are also bringing in a whole new way of working with GitLab CI/CD: [child/parent pipelines](https://gitlab.com/gitlab-org/gitlab-ce/issues/22972).\nUsing these you'll be able to trigger downstream pipelines from your main pipeline; these will run completely independently\nand in their own separate namespace from the main pipeline, but will provide status attribution back to the main pipeline. These\nchild pipelines are definable in YAML files anywhere in your repo, so if you have a monorepo (for example) you'll be able to organize\nthese independent pipelines separately but still orchestrate them from a central command and control module.\n\nFinally, we're looking to improve how we show the [change in pipeline duration over time](https://gitlab.com/gitlab-org/gitlab-ee/issues/1806)\nas well as how [test runs are changing over time](https://gitlab.com/gitlab-org/gitlab-ee/issues/1020). This trend data will make\nit easier to manage the performance of your pipelines on an ongoing basis.\n\n## In conclusion\n\nHopefully you're as excited about these features as much as we are. We'd love for you to participate\nin the public issues so we can work together to deliver these features with your input. It's\npossible some specific items may change, but overall\nthis is the direction we're headed as we continue to add iterative improvements across all of CI/CD in\nevery release.\n\nInterested in learning more about GitLab CI/CD in general, and seeing all the rest of\nthe items we plan to deliver? Visit our [CI/CD strategy page](/direction/ops/)\nfor our themes, priorities, and more details on what's coming next.\n\nPhoto by [Reginar](https://unsplash.com/photos/4fQAMZNaGUo?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/arrow?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[894,110,749],{"slug":6049,"featured":6,"template":678},"a-look-ahead-for-gitlab-cicd","content:en-us:blog:a-look-ahead-for-gitlab-cicd.yml","A Look Ahead For Gitlab Cicd","en-us/blog/a-look-ahead-for-gitlab-cicd.yml","en-us/blog/a-look-ahead-for-gitlab-cicd",{"_path":6055,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6056,"content":6062,"config":6067,"_id":6069,"_type":16,"title":6070,"_source":17,"_file":6071,"_stem":6072,"_extension":20},"/en-us/blog/feature-flags-continuous-delivery",{"title":6057,"description":6058,"ogTitle":6057,"ogDescription":6058,"noIndex":6,"ogImage":6059,"ogUrl":6060,"ogSiteName":692,"ogType":693,"canonicalUrls":6060,"schema":6061},"Learn more about Feature Flags: The next step in Progressive Delivery","How Feature Flags are continuing the next evolution of continuous delivery.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670020/Blog/Hero%20Images/feature-flags.jpg","https://about.gitlab.com/blog/feature-flags-continuous-delivery","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Learn more about Feature Flags: The next step in Progressive Delivery\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-08-06\",\n      }",{"title":6057,"description":6058,"authors":6063,"heroImage":6059,"date":6064,"body":6065,"category":14,"tags":6066},[4535],"2019-08-06","\n\n[DevOps](/topics/devops/) is always evolving. Continuous delivery made a major impact on the way software is deployed, but we don’t think the innovation stops there. As we move into more of a [multi-cloud](/topics/multicloud/), hybrid development world, continuous delivery has continued to change into something more “progressive.”\n\n[Progressive Delivery](https://redmonk.com/jgovernor/2018/08/06/towards-progressive-delivery/) isn’t exactly the new idea that continuous delivery was; it’s simply a continuation of it. What Progressive Delivery does is give more precision to the delivery process through new ideas and best practices, reducing the risks of one big, risky deployment. At GitLab, we think Progressive Delivery is the next logical evolution of DevOps beyond CI/CD and will become the default way to release software in the future.\n\nWe previously discussed [how Review Apps can enable Progressive Delivery](/blog/progressive-delivery-using-review-apps/), and today we’ll discuss the targeted rollout process of Feature Flags.\n\n## What are Feature Flags?\n\n[Feature Flags](/direction/release/feature_flags/) (also known as feature toggles, feature flippers, or feature switches) give developers the ability to roll out features selectively without changing the source code. Incomplete features can be merged into the production code but flagged on or off, which allows many small, incremental versions of software to be delivered without the cost of constant branching and merging.\n\nFeature Flags are designed to minimize the blast radius of releasing new features. By utilizing Feature Flags, developers can release to a subset of users and roll back easily through toggling, leaving the live code intact. A feature can also be tested before it’s completed and ready for release. This technique allows developers to release a version of a product that has unfinished features which are hidden (toggled) so they do not appear in the user interface.\n\n[Martin Fowler organizes Feature Flags into four different categories](https://martinfowler.com/articles/feature-toggles.html) based on how long they’re typically in place and how dynamic they should be:\n\n*   **Release toggles**: A temporary flag which allows incomplete, latent code to be shipped to production and turned on or off, or perhaps never enabled at all.\n*   **Experiment toggles**: A short-lived toggle usually used for multivariate A/B testing, kept in place only long enough to gather results.\n*   **Ops toggles**: For releases that have unclear performance implications, this toggle allows system administrators to roll back quickly, but it’s not unheard of for long-term toggles to remain in place as a kill switch.\n*   **Permission toggles**: Manages features for specific users, such as “premium” features, alpha or beta features, or even internal features. These toggles can be very long-lived.\n\nFeature Flags can be a quick way to do [version control](/topics/version-control/) so that [continuous delivery](/topics/continuous-delivery/) remains continuous. Their ability to turn off or on with simple commands makes Feature Flags a low-risk option for introducing new features. While they’re easy to use, they can have some drawbacks if not implemented properly.\n\n## Working with Feature Flags\n\nSome worry about the added complexity with Feature Flags, since code may need to be tested with toggles on and off, essentially doubling the load. While it’s not necessary to test every toggle configuration, a best practice is for developers to test code that has the greatest likelihood of going live in production. According to Martin Fowler, a good convention is to enable existing or legacy behavior when a Feature Flag is Off, and new or future behavior when it's On.\n\nAnother risk of using Feature Flags is stale flags, a situation when flags are left in the code and forgotten about. As teams add more and more flags into their code, it can become harder to keep track of and verify the flags.\n\nToday, organizations rely on feature management systems such as [Launch Darkly](https://launchdarkly.com/) or [Optimizely](https://blog.optimizely.com/2017/10/18/feature-management/) in order to use Feature Flags. As with any link in a toolchain, this adds an additional level of oversight that can be hard to manage and maintain. Analysts recognize that feature-toggling capabilities are becoming more of what's fundamentally needed for a continuous delivery platform. While we are still in the early stages of Feature Flags, we do have some alpha Feature Flag capabilities already built into GitLab you can try out today, and we will be launching additional functionality in 12.2:\n\n*   [Feature Flags enabled for specific users](https://gitlab.com/gitlab-org/gitlab-ee/issues/11459)\n*   [Percent rollout per environment](https://gitlab.com/gitlab-org/gitlab-ee/issues/8240)\n\n## GitLab and Progressive Delivery\n\nAs we continue to iterate on our [product vision for CI/CD](/direction/ops/#progressive-delivery), we’re adopting a Progressive Delivery mindset for how we implement new features into GitLab. As a complete [DevOps platform](/solutions/devops-platform/), delivered as a [single application](/topics/single-application/), it’s important for us to offer a comprehensive solution that offers the latest best practices. Review Apps, Canary Deployments, and Feature Flags are just some of the ways we’re bringing Progressive Delivery to the GitLab community.\n\nTo learn more about how we’re using Feature Flags and Feature flag best practicies in GitLab, watch this deep dive with our Director of Product Management, [Jason Yavorska](/company/team/#jyavorska).\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/TSSqNUhbbmQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nFeature Flags can be a useful way to validate and measure performance before rolling out a feature to a broader audience. High visibility makes DevOps more efficient, and integrating Feature Flags into the same application where your code repositories, CI/CD, project planning, and monitoring occurs can overcome many of the challenges associated with Feature Flags.\n\nLearn how GitLab’s built-in CI/CD helps teams implement Progressive Delivery tools such as [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/), [Feature Flags](/direction/release/feature_flags/), and [Canary Deployments](https://docs.gitlab.com/ee/user/project/canary_deployments.html), without the complicated integrations and plugin maintenance.\n\n[Explore GitLab CI/CD](/solutions/continuous-integration/)\n{: .alert .alert-gitlab-purple .text-center}\n\nCover image by [Chris Lawton](https://unsplash.com/@chrislawton?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/flags?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[110,749],{"slug":6068,"featured":6,"template":678},"feature-flags-continuous-delivery","content:en-us:blog:feature-flags-continuous-delivery.yml","Feature Flags Continuous Delivery","en-us/blog/feature-flags-continuous-delivery.yml","en-us/blog/feature-flags-continuous-delivery",{"_path":6074,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6075,"content":6081,"config":6086,"_id":6088,"_type":16,"title":6089,"_source":17,"_file":6090,"_stem":6091,"_extension":20},"/en-us/blog/quantifying-ux-positioning-of-the-clone-button",{"title":6076,"description":6077,"ogTitle":6076,"ogDescription":6077,"noIndex":6,"ogImage":6078,"ogUrl":6079,"ogSiteName":692,"ogType":693,"canonicalUrls":6079,"schema":6080},"Quantifying UX: Positioning the clone button","We wanted to move the clone button on the project overview page. Here's how user testing helped us make the right choices.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672622/Blog/Hero%20Images/positioning-clone-button.jpg","https://about.gitlab.com/blog/quantifying-ux-positioning-of-the-clone-button","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Quantifying UX: Positioning the clone button\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matej Latin\"}],\n        \"datePublished\": \"2019-07-26\",\n      }",{"title":6076,"description":6077,"authors":6082,"heroImage":6078,"date":6083,"body":6084,"category":14,"tags":6085},[4066],"2019-07-26","\nWe recently redesigned GitLab's project overview page in an effort to make it easier to read. We wanted\nto make it simple for users to understand what the project is about and to get a quick overview of\nits status and activity. We considered moving the clone button further down the page,\nbut decided to put a smaller version in the header instead. The logic behind this decision:\n*Things further down the page are harder to find.*\n\n![GitLab's project overview before the most recent redesign](https://about.gitlab.com/images/blogimages/clone-button-positioning/01.jpg){: .medium.center}\n\nThe original project overview page. Lack of structure and an unclear information architecture were\n  two major problems.\n  {: .note.text-center}\n\nWe know one of the main things users want to do on the project overview page is *clone the project*.\nWe were already changing the UI so we would hide both clone URLs (HTTPS and SSH) behind a\ndedicated “clone” button, but we were concerned that change would have a negative\nimpact on the discoverability of the cloning options.\n\n![Redesigned project overview page](https://about.gitlab.com/images/blogimages/clone-button-positioning/02.jpg){: .medium.center}\n\nThe redesigned project overview page that is currently live.\n{: .note.text-center}\n\nWe received some negative feedback after the change but nothing that was too serious. The feedback was mostly about\nhaving to make an additional click to get to what the user wants. We concluded\nit was a compromise we could live with.\n\n## Moving the clone button\n\nBut after a while, we started receiving more feedback and suggestions\nto [move the clone button down to the file tree control area](https://gitlab.com/gitlab-org/gitlab-ce/issues/60022).\nThe initial suggestion was made because the recent redesign of the project overview page made\nthe clone button completely disappear from the repository page. Removing it from\nthe file tree section in one place removed it from all occurrences of this UI pattern.\n\n![New position for the clone button](https://about.gitlab.com/images/blogimages/clone-button-positioning/03.jpg){: .medium.center}\n\nThe proposal suggested we move the clone button down to the file tree controls.\n{: .note.text-center}\n\nI remembered the negative feedback we received for our most recent change so I wanted to\nmake our decision with some research. I quickly created a [UsabilityHub](https://usabilityhub.com) click test\nthat would tell us if the discoverability of the button worsened by moving it further down the page. The test was\nsimple: show the new design and ask the participants one\nquestion – *Where would you click to copy (and sync) this repository to your local machine?*\nOur UX research team helped me shape the question so that it wasn’t leading (we couldn’t use\nthe word “clone”). We would also run a control test with the live design – the one where\nthe clone button is in the header – so that we could have a baseline for comparison.\n\n![The click test](https://about.gitlab.com/images/blogimages/clone-button-positioning/click-test.gif){: .medium.center}\n\nThis is what solving a click test looked like.\n{: .note.text-center}\n\nAs I was working on the test, I thought it was going to further validate the recent change where\nwe moved the clone button to the header. It makes sense: If a dark blue button is on the\ntop right on a page, it’s easier to notice than if it’s further down or possibly below the fold.\nBut then I remembered that other Git platforms (most notably GitHub) have the clone button in the same\nplace we were considering. The test went live and I had no idea what to expect. We soon collected\naround 40 answers to each of the two variations and we felt that was enough to draw conclusions.\n\nThe results were surprising.\n\n![The results of the test](https://about.gitlab.com/images/blogimages/clone-button-positioning/04.jpg){: .medium.center}\n\nThe results of the new design on the left and the current one on the right.\n{: .note.text-center}\n\n| Version | Correct answers | Time required |\n| ------- | ---- | --------------|\n| New | 98%    | 15s         |\n| Current | 84%    | 21s         |\n\nAlmost all participants (98%) answered correctly in the new design compared to 84% in the current design.\nAnd in the new design it took them six seconds less to answer – 15 seconds instead of 21. So this means it\nmakes sense to move the clone button to the file tree controls and reintroduce it on the repository page.\nIt’s a win-win. No compromises there. But what can we do when the repository of a project\nis empty? We show different information on that page when a repository is empty and the layout of\nthe page is slightly different too.\n\n## Cloning an empty repository\n\nSo we solved one part of the problem and now it was time to solve the other part. When the\nrepository of a project is empty we show instructions on how to use it.\nCloning instructions are included as well but there’s no button in the cloning instructions or\nanywhere close. So far we didn’t really need one as we had one in the header.\n\n![Current empty repository page layout](https://about.gitlab.com/images/blogimages/clone-button-positioning/05.jpg){: .medium.center}\n\nCurrent empty repository project overview page.\n{: .note.text-center}\n\nBut moving that button down to the file tree controls now meant we wouldn’t have a button in\nthe header anymore. This same scenario applies to the empty repository too! So what should we do? What\nwould happen if we completely removed it?\n\n![Empty repository page without the clone button](https://about.gitlab.com/images/blogimages/clone-button-positioning/06.jpg){: .medium.center}\n\nEmpty repository project overview page without the clone button. Will removing\n  it have a profoundly negative effect on user experience?\n  {: .note.text-center}\n\nThis was another question we could answer with a quick test. I created two variations of the\ntest – one with the button in the header (current design) and one without it (new design). We would\nshow one of the variations to a participant and ask: *Where would you find the\ninformation for copying (and syncing) this repository to your local machine?*\n\nYou’re probably thinking the result of this test should be obvious – the variation\nwith the button should win. We were thinking that too, but we wanted to see what the difference was.\nWe wanted to quantify it so we could make an informed decision. If the results were really\nbad, we would consider adding a clone button to the instructions area. This solution felt a bit\nodd so we wanted to make sure it was the right thing to do.\n\n![Results of the second test](https://about.gitlab.com/images/blogimages/clone-button-positioning/07.jpg){: .medium.center}\n\nResults of the new design (without the button) on the left and the current design (with the button)\n  on the right.\n  {: .note.text-center}\n\nAnd yes, the results were what we expected. Just over three-quarters of users (77%) answered\ncorrectly in the current design and it took them 16 seconds. Removing the button altogether meant\nonly 50% of users found the cloning information and it took them 37 seconds. That’s 21 seconds longer!\nWe concluded removing the button had a very negative impact on user experience so we decided\nto introduce a clone button in the instructions area.\n\n| Version | Correct answers | Time required |\n| ------- | ---- | --------------|\n| New | 50%    | 37s         |\n| Current | 77%    | 16s         |\n\n![New design for the empty repository page](https://about.gitlab.com/images/blogimages/clone-button-positioning/08.jpg){: .medium.center}\n\nIn the end, we decided to add the clone button on top of the instructions sections, where\n  all other buttons already are.\n  {: .note.text-center}\n\nThe solution is [currently being implemented by a member of our awesome\ncommunity](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/27754) and we’re looking forward\nto seeing this change live!\n\nRead my previous [Quantifying UX blog post about redesigning GitLab's settings pages](/blog/quantifying-ux-validating-the-redesign-of-gitlabs-settings-pages/).\n\nCover image by [David Travis](https://unsplash.com/@dtravisphd?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[915,1144,959],{"slug":6087,"featured":6,"template":678},"quantifying-ux-positioning-of-the-clone-button","content:en-us:blog:quantifying-ux-positioning-of-the-clone-button.yml","Quantifying Ux Positioning Of The Clone Button","en-us/blog/quantifying-ux-positioning-of-the-clone-button.yml","en-us/blog/quantifying-ux-positioning-of-the-clone-button",{"_path":6093,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6094,"content":6100,"config":6106,"_id":6108,"_type":16,"title":6109,"_source":17,"_file":6110,"_stem":6111,"_extension":20},"/en-us/blog/moving-workflows-to-gitlab-the-case-of-the-hipaa-audit-protocol",{"title":6095,"description":6096,"ogTitle":6095,"ogDescription":6096,"noIndex":6,"ogImage":6097,"ogUrl":6098,"ogSiteName":692,"ogType":693,"canonicalUrls":6098,"schema":6099},"Moving workflows to GitLab: The case of the HIPAA Audit Protocol","With the GitLab API, you can easily move workflows into GitLab. Here’s how we did it for the HIPAA Audit Protocol.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679716/Blog/Hero%20Images/bright-cardiac-cardiology.jpg","https://about.gitlab.com/blog/moving-workflows-to-gitlab-the-case-of-the-hipaa-audit-protocol","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Moving workflows to GitLab: The case of the HIPAA Audit Protocol\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Luka Trbojevic\"}],\n        \"datePublished\": \"2019-07-25\",\n      }",{"title":6095,"description":6096,"authors":6101,"heroImage":6097,"date":6103,"body":6104,"category":14,"tags":6105},[6102],"Luka Trbojevic","2019-07-25","\n\nUsing GitLab for just about everything we do, I’ve seen firsthand how powerful and effective\nit can be as a project management tool.\n\n**However, in speaking with folks about adopting GitLab for their own non-development\nworkflows, the most common roadblock I hear is the lack of specific examples.** If you're not\na developer or otherwise don't work with GitLab often, it can be hard to see how all the\nfeatures and capabilities fit together to go from an idea to a functional workflow. Because\nof this, I thought it was important to create a specific, real-world example for something most\nfolks can relate to: an audit.\n\nThe [HIPAA Audit Protocol](https://www.hhs.gov/hipaa/for-professionals/compliance-enforcement/audit/protocol/index.html)\nis published by the U.S. Department of Health & Human Services and is used by the\nOffice for Civil Rights as part of its HIPAA compliance enforcement efforts. The\nHIPAA Audit Protocol currently exists as a table on the HHS website and is most commonly turned\ninto a spreadsheet. But there are limitations and inefficiencies to working with an audit\nprotocol in a spreadsheet, as compared to GitLab:\n\n* Collaboration is challenging in a spreadsheet. With issues, robust and well-organized conversations are easy.\n* You can't upload audit protocol files directly to the spreadsheet. With issues, you can upload files directly.\n* There's no simple, clean way to maintain a full, comprehensive change history in a\nspreadsheet. With issues and merge requests, change history and logging is directly built in.\n* It can be challenging to manage due dates, milestones, and work assignments in a\nspreadsheet. With issues, those are all native features.\n\n## Introducing the HIPAA Audit Protocol Issue Generator\n\nThe HIPAA Audit Protocol Issue Generator is a simple Python script using the\n`python-gitlab` API wrapper to create issues out of every audit inquiry in the protocol.\nThe purpose of the script is to highlight how easy it is to use the GitLab API to move\nworkflows inside of GitLab and leverage GitLab's project management capabilities.\n\nThe best part? Creating this tool was really easy and simple. I started by copying the\nHTML table of the audit protocol into a CSV. Then I wrote a simple loop to go through each\nrow in the CSV and automatically create an associated issue. Note: While you can use the\nissue importer, you’d have to create a very strictly structured and formatted import file.\nWith the GitLab API, you have more flexibility and the output is easier to work with.\n\nFor your use, we’ve made\nthe [full script and a CSV of the audit protocol](https://gitlab.com/ltrbojevic/hipaa-audit-protocol-issue-generator)\navailable.\n\nHere’s what an issue looks like:\n\n![sample issue](https://about.gitlab.com/images/blogimages/HIPAA-audit-protocol-example.png){: .shadow.medium.center}\n\nAll the information you need is front and center and the issues are labeled.\n{: .note.text-center}\n\n### Customizing the issue structure\n\nFirst, it’s important to understand how we load the data from the CSV. There are different\nways to do it, but I like to assign every column in the CSV to a variable, then pass that\nvariable to the issue create API call. For this script, we have:\n\n```\naudit_type = col[0]\nsection = col[1]\nkey_activity = col[2]\nestablished_performance_criteria = col[3]\naudit_inquiry = col[4]\nrequired_addressable = col[5]\n```\n\nThat means `audit_type` is the first column, `section` is the second column, and so on.\n\nThese variables then get used in the issue create API call. For this script, we have:\n\n```\nissue = project.issues.create({'title': key_activity,\n'description': '## Established Performance Criteria' + '\\n' +\nestablished_performance_criteria + '\\n' + '## Audit Inquiry' +\n'\\n' + audit_inquiry,\n'labels': [audit_type] + [required_addressable] + [section]})\n```\n\n#### Title\n\nI decided to use the respective Key Activity of every audit inquiry. In the issue create API\ncall, it looks like this:\n\n` ‘title’: key_activity, `\n\nYou can make the title anything you want. In this case I just used a column from the CSV, but I\ncould start or end the title with some other text not in the CSV, like:\n\n` ‘title’: Any text you want:’ + ‘ ‘ key_activity, `\n\n#### Description\n\nI just used a string to manually write out the headers for the section and filled the sections\nusing the data from the CSV. In the issue create API call, it looks like this:\n\n` 'description': '## Established Performance Criteria' + '\\n' + established_performance_criteria + '\\n' + '## Audit Inquiry' + '\\n' + audit_inquiry, `\n\nNotice how I use the newline. Without the newline, all of the data would be added to the description\nright next to each other and it would be unreadable. You can add as many newlines as you want,\nand if you’re planning on doing regular editing of the issue description itself, consider\nadding two newlines to create a new paragraph so the issue description is more readable in edit mode.\n\n#### Labels\n\nLabels are very helpful for organizing, searching, filtering, and creating boards. For the labels,\nI opted to use Audit Type and Section. In the issue create API call, it looks like this:\n\n` 'labels': [audit_type] + [required_addressable] + [section] `\n\nYou can also add your own labels to the CSV by creating a new column and adding the labels you\nwant for every given row, or you can add a static label applied to all the issues by adding it\nto the API call. Make sure to keep the variables in brackets or the string will split\n(for example, instead of `Privacy` it will create a label for each letter in the word `Privacy`).\n\n#### Adding other sections\n\nYou can customize the script to add any other sections allowable by\nthe [GitLab API](https://docs.gitlab.com/ee/api/). Because we’re working with issues,\nthe [GitLab Issues API](https://docs.gitlab.com/ee/api/issues.html) documentation will be\nhelpful. The [`python-gitlab` documentation](https://python-gitlab.readthedocs.io/en/stable/index.html) is\nalso a great resource, given that this script makes use of it.\n\n## Making your own workflows in GitLab\n\nWhile in this blog post I've focused on the HIPAA Audit Protocol and the issue generator\nscript, it is also a practical, hands-on example of how simple it is to use the\nGitLab API to move any workflow to GitLab. There are two primary components:\n\n1. A data source (I prefer CSV files)\n2. A crafted API call to use the data source to bring the data into GitLab\n\nI think of the data source as the thing I want in GitLab and the crafted API call as the\nvehicle to get it into GitLab. Think of a row in your CSV as an issue and the columns as the\nthings you’re putting in the issue.\n\nWhile my specific example was the audit protocol, we can use this strategy for just about anything.\nSome examples are risk assessments, gap analyses, event planning, product launches, and more.\n\nTo adapt this script for other workflows:\n1. Start by getting your data into a CSV. Be sure to remove your headers before running the script\nbecause the script doesn’t account for column headers as is!\n2. Modify the variables and issue create API call we talked about in\nthe [Customizing the issue structure section above](#customizing-the-issue-structure) to match with your CSV and data.\n\nAnd that’s really it!\n\nAt GitLab, we use the\n[simplest and most boring solution](https://handbook.gitlab.com/handbook/values/#boring-solutions)\nto solve problems. With this approach, we were able to turn an HTML table of the HIPAA Audit Protocol\non the HHS website into a functional workflow within GitLab in just a few\nhours (including API research time and CSV formatting) and in 42 lines of code (including comments).\nTo add to that, the script can be repurposed for just about any other workflow. Plus, the script is\navailable for anyone to download, use, and modify in any way, and this blog post can serve as a\nguide on how to do that.\n\nAs a long-term solution to bring this functionality into\nGitLab as a feature, there’s also an\n[open issue to collect feedback on creating\na marketplace for issue templates](https://gitlab.com/gitlab-org/gitlab-ce/issues/62895). If you have any suggestions or comments about the marketplace\nidea, feel free to post them in the issue!\n\nHave a specific question you want answered or want to get feedback on a specific use case? Comment below!\n\n### Disclaimer\nTHE INFORMATION PROVIDED ON THIS WEBSITE IS TO BE USED FOR INFORMATIONAL PURPOSES ONLY. THE\nINFORMATION SHOULD NOT BE RELIED UPON OR CONSTRUED AS LEGAL OR COMPLIANCE ADVICE OR OPINIONS.\nTHE INFORMATION IS NOT COMPREHENSIVE AND WILL NOT GUARANTEE COMPLIANCE WITH ANY REGULATION OR\nINDUSTRY STANDARD. YOU MUST NOT RELY ON THE INFORMATION FOUND ON THIS WEBSITE AS AN\nALTERNATIVE TO SEEKING PROFESSIONAL ADVICE FROM YOUR ATTORNEY AND/OR COMPLIANCE PROFESSIONAL.\n{: .note}\n\nPhoto by [Pixabay](https://www.pexels.com/photo/bright-cardiac-cardiology-care-433267/) on [Pexels](https://www.pexels.com)\n{: .note}\n",[915,703,1307],{"slug":6107,"featured":6,"template":678},"moving-workflows-to-gitlab-the-case-of-the-hipaa-audit-protocol","content:en-us:blog:moving-workflows-to-gitlab-the-case-of-the-hipaa-audit-protocol.yml","Moving Workflows To Gitlab The Case Of The Hipaa Audit Protocol","en-us/blog/moving-workflows-to-gitlab-the-case-of-the-hipaa-audit-protocol.yml","en-us/blog/moving-workflows-to-gitlab-the-case-of-the-hipaa-audit-protocol",{"_path":6113,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6114,"content":6119,"config":6124,"_id":6126,"_type":16,"title":6127,"_source":17,"_file":6128,"_stem":6129,"_extension":20},"/en-us/blog/cross-project-pipeline",{"title":6115,"description":6116,"ogTitle":6115,"ogDescription":6116,"noIndex":6,"ogImage":5923,"ogUrl":6117,"ogSiteName":692,"ogType":693,"canonicalUrls":6117,"schema":6118},"How to trigger multiple pipelines using GitLab CI/CD","Discover how to trigger and visualize pipelines when you set up GitLab CI/CD across multiple projects.","https://about.gitlab.com/blog/cross-project-pipeline","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to trigger multiple pipelines using GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2019-07-24\",\n      }",{"title":6115,"description":6116,"authors":6120,"heroImage":5923,"date":6121,"body":6122,"category":14,"tags":6123},[1019],"2019-07-24","\n[Continuous integration (CI)](/solutions/continuous-integration/) is the practice of [automating code building and testing](/topics/ci-cd/) before it is\nmerged into the master or default branch. This allows developers to merge code early and frequently, while\nmitigating the risk of introducing new bugs into the master source code repository.\n\nWhile CI verifies that new code won't break when integrated with other code in the same repo, having\nall tests pass on that repo is only the first step. After running CI on the code, it is important to\ndeploy and run tests in a live environment. Moving from [CI to continuous delivery and deployment (CD)](/solutions/continuous-integration/)\nis [the next step of DevOps maturity](/topics/devops/). Deploying and then testing again allows code in one project\nto be tested together with other components and services which may be managed in other projects.\n\n## Why do I need to verify that my code works with other components?\n\nA good example could be a\nmicroservices architecture. Usually, different [microservices](/topics/microservices/) are managed in\ndifferent [projects](https://docs.gitlab.com/ee/user/project/) – each microservice has its own\nrepository and own pipeline. It's also very common for different teams to be\nresponsible for different microservices and their pipeline configurations. As a developer you will\nwant to confirm that your code changes don't break functionality of the dependent microservices.\nTherefore, you will want to execute tests on those microservices in addition to your project tests.\n\n## The cross-project pipeline\n\nWhen running your [project pipeline](/topics/ci-cd/cicd-pipeline/), you also want to trigger cross-project or multi-project pipelines,\nwhich will eventually deploy and test the latest version of all dependent microservices. To\nachieve this goal you need an easy, flexible and convenient way to trigger other\npipelines as part of your project CI. GitLab CI/CD offers an easy way to run a cross-project\npipeline by simply adding a pipeline trigger job in the CI configuration file.\n\n## GitLab CI/CD configuration file\n\nIn GitLab CI/CD, pipelines, and their component jobs and stages, are defined in\nthe [`.gitlab-ci.yml`](https://docs.gitlab.com/ee/ci/yaml/) file for each project. The\nfile is part of the project repository. It is fully versioned and developers can edit it with any\ncommon IDE of their choice. They do not have to ask the system admin or DevOps team to make\nchanges in the pipeline configuration as it is self-service. The `.gitlab-ci.yml` file defines the structure\nand order of the pipelines and determines what to execute\nusing [GitLab Runner](https://docs.gitlab.com/runner/) (the agent that runs the jobs), and what\ndecisions to make when specific conditions are encountered, like when a process succeeds or fails.\n\n## Add a cross-project pipeline triggering job\n\nSince GitLab 11.8, GitLab provides a new CI/CD configuration syntax for triggering cross-project\npipelines found in the [pipeline configuration file](https://docs.gitlab.com/ee/ci/yaml/).\nThe following code illustrates configuring a bridge job to trigger a downstream pipeline:\n\n```\n//job1 is a job in the upstream project\ndeploy:\n\tstage: Deploy\n\tscript: this is my script\n\n//job2 is a bridge job in the upstream project which triggers cross-project pipeline\nAndroid:\n\tstage: Trigger-cross-projects\n            trigger: mobile/android\n```\n\nIn the example above, as soon as the deploy job succeeds in the deploy stage, the Android\nbridge job is going to be started. The initial status of this job will be pending. GitLab will\ncreate a downstream pipeline in the mobile/android project and, as soon as the pipeline gets created,\nthe Android job will succeed. In this case mobile/android is a full path to that project.\n\nThe user who created the upstream pipeline needs to have access rights to the downstream\nproject (mobile/android in this case). If a downstream project cannot be found, or a user does not\nhave access rights to create a pipeline there, the Android job will be marked as failed.\n\n## Browse from upstream pipeline graphs to downstream\n\nGitLab CI/CD makes it possible to visualize the pipeline configuration. In the below illustration, the\nbuild, test, and deploy stages are parts of the upstream project. Once the deploy job succeeds, four\ncross-projects will be triggered in parallel and you will be able to browse to them by clicking on\none of the downstream jobs.\n\n![Build, test and deploy stages](https://about.gitlab.com/images/blogimages/Cross-proj-img1.png){: .shadow.medium.center}\n\nIn the below illustration the Service – Finance downstream pipeline is visible. We can now scroll\nleft to the upstream pipeline, scroll right back to the downstream pipeline or select another\ndownstream pipeline.\n\n![Service – Finance pipeline](https://about.gitlab.com/images/blogimages/Cross-proj-img2.png){: .shadow.medium.center}\n\n## Specifying a downstream pipeline branch\n\nIt is possible to specify a branch name that a downstream pipeline will use:\n\n```\ntrigger:\n     project: mobile/android\n     branch: stable-11-2\n```\n\nUse a project keyword to specify the full path to a downstream project. Use a branch keyword to\nspecify a branch name. GitLab will use a commit that is currently on the HEAD of the branch\nwhen creating a downstream pipeline.\n\n## Passing variables to a downstream pipeline\n\nSometimes you might want to pass variables to a downstream pipeline. You can do that using\nthe variables keyword, just like you would when defining a regular job.\n\n```\nAndroid:\n           variable:\n\t     ENVIRONMENT: ‘This is the variable value for the downstream pipeline’\n           stage: Trigger-cross-projects\n           trigger: mobile/android\n```\nThe ENVIRONMENT variable will be passed to every job defined in a downstream pipeline. It will be\navailable as an environment variable when GitLab Runner picks a job.\n\n## Cross-project pipeline summary\n\nThe `.gitlab-ci.yml` file defines the order of the CI/CD stages, which jobs to execute, and at which\nconditions to run or skip a job's execution. Adding a 'bridge job' with the `trigger` keyword to\nthis file can be used to trigger cross-project pipelines. We can pass parameters to jobs in\ndownstream pipelines, and even define a branch that a downstream pipeline will use.\n\nPipelines can be complex structures with many sequential and parallel jobs, and as we just\nlearned, sometimes they can trigger downstream pipelines. To make it easier to understand the\nflow of a pipeline, including its downstream pipelines, GitLab has pipeline graphs for viewing\npipelines and each pipeline's status.\n\n![Service – Finance pipeline](https://about.gitlab.com/images/blogimages/Cross-proj-img4.png){: .shadow.medium.center}\n\nHey community, what else would you like me to explain in a blog post? Let me know in the comments or tweet us [@gitlab](https://twitter.com/gitlab).\n\nCover image by [Tian Kuan](https://unsplash.com/@realaxer) on [Unsplash](https://unsplash.com)\n{: .note}\n",[110,894,749,1328,727],{"slug":6125,"featured":6,"template":678},"cross-project-pipeline","content:en-us:blog:cross-project-pipeline.yml","Cross Project Pipeline","en-us/blog/cross-project-pipeline.yml","en-us/blog/cross-project-pipeline",{"_path":6131,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6132,"content":6138,"config":6143,"_id":6145,"_type":16,"title":6146,"_source":17,"_file":6147,"_stem":6148,"_extension":20},"/en-us/blog/three-new-support-tools",{"title":6133,"description":6134,"ogTitle":6133,"ogDescription":6134,"noIndex":6,"ogImage":6135,"ogUrl":6136,"ogSiteName":692,"ogType":693,"canonicalUrls":6136,"schema":6137},"We've open sourced 3 tools to help troubleshoot system performance","Say hello to the open source tools our Support team is using to better summarize customer performance data – and find out how they can help you.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670405/Blog/Hero%20Images/open_source_tools.jpg","https://about.gitlab.com/blog/three-new-support-tools","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We've open sourced 3 tools to help troubleshoot system performance\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Will Chandler\"},{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-07-24\",\n      }",{"title":6133,"description":6134,"authors":6139,"heroImage":6135,"date":6121,"body":6141,"category":14,"tags":6142},[6140,3676],"Will Chandler","\nOur self-managed customers often encounter issues related to performance, or the time it takes to execute something. In the past, the [Support team](/handbook/support/) had to pull data from disparate sources and cobble it together in order to analyze performance-related issues.\n\n“We’re dealing with someone else’s computer on support, so we have to be able to handle environments with limited observability,” says [Will Chandler](/company/team/#wchandler), senior support engineer. “We’re at the mercy of their infrastructure. That’s why the team has made tools to reduce the friction.”\n\n“With [GitLab.com](/pricing/), we have all of this fancy tooling that helps us collect performance data,” says [Lee Matos](/company/team/#leematos), support engineering manager. “But when we’re working with customers, we need to be ready to bring lightweight tools that don’t require a lot of setup that we can use based on what they have in place.”\n\nThe Support team is working on becoming more data driven by using three new tools designed to aggregate and summarize performance data for self-managed customers. A focus on data-driven decision-making improves the customer relationship and demonstrates our commitment to making performance a key feature of GitLab.\n\nWe'll look at three open source tools created by GitLab Self-Managed Support. Strace parser is a general tool that could be of use to anyone, while JSON Stats and GitLabSOS are tailored to GitLab, but could be easily modified.\n\n## 1. [Strace parser](https://gitlab.com/gitlab-com/support/toolbox/strace-parser)\n\n[Strace](https://gitlab.com/strace/strace) is a commonly used debugging and diagnostic tool in Linux that captures information about what’s happening inside processes running on our customers’ environments.\n\nUnlike [newer](http://man7.org/linux/man-pages/man1/perf.1.html) and [more powerful](https://github.com/iovisor/bpftrace) tracing tools, strace adds [significant overhead to a process](http://www.brendangregg.com/blog/2014-05-11/strace-wow-much-syscall.html). However, strace is generally available even on very old versions of Linux.\n\nAn strace of a single-threaded program is linear, but following the threads of execution quickly gets difficult when there are many processes being captured. At GitLab Support we are typically tracing [Unicorn](https://bogomips.org/unicorn/) workers or [Gitaly](https://gitlab.com/gitlab-org/gitaly), which are highly concurrent, resulting in hundreds of process IDs being traced and hundreds of thousands of lines of output from traces only a few seconds long.\n\nWill built [strace parser](https://gitlab.com/gitlab-com/support/toolbox/strace-parser) for these types of use cases. Strace parser summarizes the most meaningful processing data delivered by an strace in a more accessible format, allowing users to find the critical section sections of the data quickly.\n\nThe next two examples are from a GitLab customer that was using a very slow file system to host their .gitconfig file, which was a major performance bottleneck. But it was not immediately clear what was happening from the perspective of a user trying to troubleshoot. By running an strace on Gitaly, we were able to get a better understanding of why the system was so slow.\n\n```\n3694  13:45:06.207369 clock_gettime(CLOCK_MONOTONIC, {3016230, 201254200}) = 0 \u003C0.000015>\n3694  13:45:06.207409 futex(0x7f645bb49664, FUTEX_WAIT_BITSET_PRIVATE, 192398, {3016230, 299906871}, ffffffff \u003Cunfinished ...>\n3542  13:45:06.209616 \u003C... futex resumed> ) = -1 ETIMEDOUT (Connection timed out) \u003C0.005236>\n3542  13:45:06.209639 futex(0x1084ff0, FUTEX_WAKE, 1) = 1 \u003C0.000023>\n3510  13:45:06.209673 \u003C... futex resumed> ) = 0 \u003C0.002909>\n3542  13:45:06.209701 futex(0xc420896548, FUTEX_WAKE, 1 \u003Cunfinished ...>\n3510  13:45:06.209710 pselect6(0, NULL, NULL, NULL, {0, 20000}, NULL \u003Cunfinished ...>\n16780 13:45:06.209740 \u003C... futex resumed> ) = 0 \u003C0.002984>\n3542  13:45:06.209749 \u003C... futex resumed> ) = 1 \u003C0.000043>\n16780 13:45:06.209776 pselect6(0, NULL, NULL, NULL, {0, 3000}, NULL \u003Cunfinished ...>\n3542  13:45:06.209787 futex(0xc420053548, FUTEX_WAKE, 1 \u003Cunfinished ...>\n16780 13:45:06.209839 \u003C... pselect6 resumed> ) = 0 (Timeout) \u003C0.000056>\n3544  13:45:06.209853 \u003C... futex resumed> ) = 0 \u003C0.003148>\n3542  13:45:06.209861 \u003C... futex resumed> ) = 1 \u003C0.000069>\n3510  13:45:06.209868 \u003C... pselect6 resumed> ) = 0 (Timeout) \u003C0.000151>\n3544  13:45:06.209915 epoll_ctl(4\u003Canon_inode:[eventpoll]>, EPOLL_CTL_DEL, 181\u003CUNIX:[164869291]>, 0xc42105bb14 \u003Cunfinished ...>\n16780 13:45:06.210076 write(1\u003Cpipe:[55447]>, \"time=\\\"2019-02-14T18:45:06Z\\\" level=warning msg=\\\"health check failed\\\" error=\\\"rpc error: code = DeadlineExceeded desc = context deadline exceeded\\\" worker.name=gitaly-ruby.4\\n\", 170 \u003Cunfinished ...>\n3544  13:45:06.210093 \u003C... epoll_ctl resumed> ) = 0 \u003C0.000053>\n3542  13:45:06.210101 futex(0x1089020, FUTEX_WAIT, 0, {0, 480025102} \u003Cunfinished ...>\n3510  13:45:06.210109 pselect6(0, NULL, NULL, NULL, {0, 20000}, NULL \u003Cunfinished ...>\n16780 13:45:06.210153 \u003C... write resumed> ) = 170 \u003C0.000064>\n3544  13:45:06.210163 close(181\u003CUNIX:[164869291]> \u003Cunfinished ...>\n```\n\nThis strace delivers more than 300,000 lines about the different Gitaly processes running on this customer’s GitLab environment, making it challenging to decipher the flow of execution.\n{: .note.text-center}\n\n“In this case, we can use strace-parser to say, ‘Just give me all the files that were opened, and sort them by how long it took to open,’” says Will.\n\n```\n$ strace-parser trace.txt files --sort duration\n\nFiles Opened\n\n      pid      dur (ms)       timestamp            error         file name\n  -------    ----------    ---------------    ---------------    ---------\n    24670      5203.999    13:45:16.152985           -           /efs/gitlab/home/.gitconfig\n    24859      5296.580    13:45:23.367482           -           /efs/gitlab/home/.gitconfig\n    24584      5279.810    13:45:09.286019           -           /efs/gitlab/home/.gitconfig\n    24666      5276.975    13:45:16.079697           -           /efs/gitlab/home/.gitconfig\n    24667      5255.649    13:45:16.101009           -           /efs/gitlab/home/.gitconfig\n    14871      2594.364    13:45:18.762347           -           /efs/gitlab/home/.gitconfig\n    24885      2440.635    13:45:26.224189           -           /efs/gitlab/home/.gitconfig\n    24886      2432.980    13:45:26.231009           -           /efs/gitlab/home/.gitconfig\n    24656        55.873    13:45:15.916836        ENOENT         /nfs/gitlab/gitdata/repositories/group/project.git/objects/info/alternates\n    24688        42.764    13:45:21.522789        ENOENT         /nfs/gitlab/gitdata/repositories/group/project.git/objects/info/alternates\n     3709        39.631    13:45:07.816618           -           /efs/gitlab/home/.gitconfig\n    24583        37.959    13:45:09.218283           -           /efs/gitlab/home/.gitconfig\n```\n\nBy summarizing the data in this way, we see multiple files that took 2-5 seconds to open, which is several orders of magnitude slower than expected.\n{: .note.text-center}\n\n“If it’s a particularly busy server and we’re performing these actions 50 times a second, 100 times a second, that adds up really fast,” says Will. “Strace-Parser lets you drill down quickly, and say, ‘OK, this specific thing we’re doing is super slow.’”\n\n### Get a closer look at processes using strace-parser\n\nStrace-Parser can also be used to drill down into details of a process.\n\nThe previous output showed PID 24670 is one of the slower processes, so we use the parser to understand how this slow call impacted the performance of the process overall.\n\n```\n$ strace-parser trace.txt pid 24670\n\nPID 24670\n\n  271 syscalls, active time: 5303.438ms, user time: 34.662ms, total time: 5338.100ms\n  start time: 13:45:16.116671    end time: 13:45:21.454771\n\n  syscall                 count    total (ms)      max (ms)      avg (ms)      min (ms)    errors\n  -----------------    --------    ----------    ----------    ----------    ----------    --------\n  open                       29      5223.073      5203.999       180.106         0.031    ENOENT: 9\n  read                       25        46.303        28.747         1.852         0.031\n  access                     11         6.948         4.131         0.632         0.056    ENOENT: 3\n  lstat                       6         5.116         2.130         0.853         0.077    ENOENT: 4\n  mmap                       32         3.868         0.485         0.121         0.028\n  openat                      2         3.757         2.934         1.878         0.823\n  fstat                      28         3.395         0.272         0.121         0.033\n  munmap                     11         2.551         0.929         0.232         0.056\n  rt_sigaction               59         2.548         0.121         0.043         0.024\n  close                      22         2.375         0.279         0.108         0.032\n  mprotect                   14         0.927         0.174         0.066         0.032\n  execve                      1         0.621         0.621         0.621         0.621\n  brk                         6         0.595         0.210         0.099         0.046\n  stat                        8         0.388         0.082         0.048         0.027    ENOENT: 3\n  getdents                    4         0.361         0.138         0.090         0.044\n  rt_sigprocmask              3         0.141         0.059         0.047         0.040\n  write                       1         0.101         0.101         0.101         0.101\n  dup2                        3         0.090         0.032         0.030         0.026\n  arch_prctl                  1         0.077         0.077         0.077         0.077\n  getrlimit                   1         0.062         0.062         0.062         0.062\n  getcwd                      1         0.044         0.044         0.044         0.044\n  set_robust_list             1         0.035         0.035         0.035         0.035\n  set_tid_address             1         0.032         0.032         0.032         0.032\n  setpgid                     1         0.030         0.030         0.030         0.030\n  ---------------\n\n  Program Executed: /opt/gitlab/embedded/bin/git\n  Args: [\"--git-dir\" \"/nfs/gitlab/gitdata/repositories/group/project.git\" \"cat-file\" \"--batch-check\"]\n\n  Parent PID:  3563\n\n  Slowest file open times for PID 24670:\n\n    dur (ms)       timestamp            error         file name\n  ----------    ---------------    ---------------    ---------\n    5203.999    13:45:16.152985           -           /efs/gitlab/home/.gitconfig\n       5.420    13:45:16.143520           -           /nfs/gitlab/gitdata/repositories/group/project.git/config\n       2.959    13:45:21.372776           -           /efs/gitlab/home/.gitconfig\n       2.934    13:45:21.401073           -           /nfs/gitlab/gitdata/repositories/group/project.git/refs/\n       2.736    13:45:21.417333        ENOENT         /nfs/gitlab/gitdata/repositories/group/project.git/info/grafts\n       2.683    13:45:21.421558           -           /nfs/gitlab/gitdata/repositories/group/project.git/objects/b7/ef5eba3a425af1e2a9cf6f51cb87454b6e1ad1\n       2.430    13:45:21.407170        ENOENT         /nfs/gitlab/gitdata/repositories/group/project.git/objects/info/alternates\n       0.992    13:45:21.420213        ENOENT         /nfs/gitlab/gitdata/repositories/group/project.git/shallow\n       0.823    13:45:21.405535           -           /nfs/gitlab/gitdata/repositories/group/project.git/objects/pack\n       0.275    13:45:21.380382           -           /nfs/gitlab/gitdata/repositories/group/project.git/config\n```\n\nThe output shows the time this process spent working was dominated by the slow file open. This data points the Support team in the right direction for fixing the underlying issue.\n{: .note.text-center}\n\nStrace itself has the `-c` flag which provides a similar summary, but its utility is limited when multiple processes are traced as it cannot break out per-process statistics.  Strace-Parser breaks these down to the PID level, and can also include the details of parent and child processes on demand.\n\n“In this case Will has identified an interesting area for our customer and then very quickly anchored it in the fact that when we look at that one spot it was slow,” says Lee. “When we’re debugging, having this data available really helps us pinpoint the problem for our customers so we can give them answers.”\n\nThe typical GitLab deployment has many different processes and services running at a time, which can create dozens of different child processes, so there is a large surface area for potential errors or slowness to occur.\n\nStrace-Parser is an open source, generic tool that anyone can use to better understand their strace data.\n\n## 2. [JSON Stats](https://gitlab.com/gitlab-com/support/toolbox/json_stats)\n\nWill also built [JSON Stats](https://gitlab.com/gitlab-com/support/toolbox/json_stats), a script that pulls performance statistics for different logs from the customer’s GitLab environment and summarizes the results in an easy-to-interpret table.\n\n```\nMETHOD                             COUNT     RPS     PERC99     PERC95     MEDIAN         MAX        MIN          SCORE    % FAIL\nFetchRemote                         2542    0.17  962176.08  130154.88   36580.23  4988513.00    1940.45  2445851585.19      1.06\nFindAllTags                         5200    0.34   30000.37   11538.63    1941.84    30006.23     252.10   156001924.68      1.63\nFindCommit                          3506    0.23   20859.98   16622.78   10841.86    30001.59    2528.67    73135073.75      0.23\nFindAllRemoteBranches               1664    0.11   20432.93   12996.75    8606.60   405503.94    1430.84    34000396.10      0.00\nAddRemote                           2603    0.17   10001.03    8094.97     825.46    10007.46     228.13    26032673.70      3.00\nFindLocalBranches                   2535    0.16   10004.68   10002.90    9051.91    10036.16    1260.89    25361871.05     34.32\n```\n\nThis output shows that we’re calling the “FindLocalBranches” service 2500+ times, and it’s failing 34% of the time.\n{: .note.text-center}\n\nThe Support team can use JSON Stats to ground their findings in evidence when evaluating overall performance for a customer. It's the same concept as strace-parser. Can we pivot the information in a way that it clearly becomes meaningful data?\n\n“It’s a quick way of extracting data that you can give to a customer. Instead of saying ‘Look, this failed once,’ we can say, ‘Look, this is failing a third of the time and that suggests there’s a problem with X,’” says Will.\n\nIn the sample output we see that JSON Stats is working with Gitaly logs, but the tool is nimble enough to work on the logs from all the heavy components of GitLab, including Rails, which runs the UI, and Sidekiq, which works on background tasks.\n\n“Some of our customers are very sophisticated and may have advanced monitoring that could give us this information. But we wanted to build a tool that would help us align and easily standardize on how we can get this performance information for customers that don’t have an advanced monitoring setup,” says Lee.\n\nWhile this specific tool isn't as helpful for people outside of the GitLab community, hopefully it helps to inspire others to consider how they are drawing conclusions, and how they can speed that process up.\n\n### Benchmarking with JSON Stats\n\nWill is building a future iteration of JSON Stats that will compare the performance of a customer’s GitLab instance with GitLab.com.\n\n![JSON benchmarking table](https://about.gitlab.com/images/blogimages/support-tools-update.png){: .shadow}\n\nBenchmarking the performance of GitLab.com (the first row) with the customer environment (second row), and the ratio between the two (third row). We can see that in the worst case, the customer’s 99th percentile FindCommit latency was almost eight times slower than it was on GitLab.com.\n{: .note.text-center}\n\n“Our vision here is to give accountability to our customers. We’re going to treat GitLab.com as the pinnacle experience for GitLab,” says Lee. “We want to use JSON Stats with benchmarking to help us understand how far away our customers are from GitLab.com.”\n\nLee and Will are still assessing how to set the target range for the customer’s instance of GitLab. But considering the wealth of resources allocated to GitLab.com, any self-managed customer that is performing within 5-10% of GitLab.com would be considered hugely successful.\n\n## 3. [GitLab SOS](https://gitlab.com/gitlab-com/support/toolbox/gitlabsos)\n\nWhen a customer encounters an issue, but they are unsure of what they problem is, they can run [GitLab SOS](https://gitlab.com/gitlab-com/support/toolbox/gitlabsos), created by support engineer [Cody West](/company/team/#codyww), to create a snapshot of different activities happening on their system. It's been so helpful in debugging GitLab that it's being added into our [Omnibus delivery](https://gitlab.com/gitlab-org/omnibus-gitlab/merge_requests/3430).\n\nBy capturing so much data about a moment in time during or shortly after encountering a problem, the support team is able to work asynchronously to troubleshoot on behalf of the customer.\n\n```\ncpuinfo              getenforce           iotop                netstat              opt                  sestatus             unicorn_stats\ndf_h                 gitlab_status        lscpu                netstat_i            pidstat              systemctl_unit_files uptime\ndmesg                gitlabsos.log        meminfo              nfsiostat            ps                   tainted              var\netc                  hostname             mount                nfsstat              sar_dev              ulimit               vmstat\nfree_m               iostat               mpstat               ntpq                 sar_tcp              uname\n```\n\nGitLab SOS works best if the script is run while an issue is occurring, or moments after, but even if the window of opportunity is missed you can still successfully gather information to diagnose the problem.\n{: .note.text-center}\n\n“If a customer is sharp, they may know what problems to look for already,” says Lee. “But if a customer is scared and they don’t know what to look for, then they can lean on a tool like GitLab SOS and learn from GitLab SOS. We even have some sharp customers that will generate the SOS output and begin to troubleshoot themselves because of the comprehensive overview it provides.”\n\n## These new tools drive data-driven decision-making in Support\n\nTools like strace-parser, JSON Stats, and GitLab SOS provide the Support team and GitLab customers with critical evidence about performance. By letting the data drive decision-making, the Support team is able to identify problems faster and quickly start debugging customer environments. Performance is a key feature of GitLab, and by filling our toolbox with data-driven solutions we can ensure greater [transparency](https://handbook.gitlab.com/handbook/values/#transparency) between GitLab and our customers.\n\nLearn more about debugging from a support engineering perspective in a GitLab Unfiltered video.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/9W6QnpYewik\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\nCover photo by [Diogo Nunes](https://unsplash.com/@dialex?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/tools?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[703,749,915],{"slug":6144,"featured":6,"template":678},"three-new-support-tools","content:en-us:blog:three-new-support-tools.yml","Three New Support Tools","en-us/blog/three-new-support-tools.yml","en-us/blog/three-new-support-tools",{"_path":6150,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6151,"content":6157,"config":6162,"_id":6164,"_type":16,"title":6165,"_source":17,"_file":6166,"_stem":6167,"_extension":20},"/en-us/blog/anomaly-detection-using-prometheus",{"title":6152,"description":6153,"ogTitle":6152,"ogDescription":6153,"noIndex":6,"ogImage":6154,"ogUrl":6155,"ogSiteName":692,"ogType":693,"canonicalUrls":6155,"schema":6156},"How to use Prometheus for anomaly detection in GitLab","Explore how Prometheus query language can be used to help you diagnose incidents, detect performance regressions, tackle abuse, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667819/Blog/Hero%20Images/anomaly-detection-cover.png","https://about.gitlab.com/blog/anomaly-detection-using-prometheus","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use Prometheus for anomaly detection in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-07-23\",\n      }",{"title":6152,"description":6153,"authors":6158,"heroImage":6154,"date":6159,"body":6160,"category":14,"tags":6161},[3676],"2019-07-23","\n\nOne of the more basic functions of the Prometheus query language is real-time aggregation of [time series data](https://prometheus.io/docs/prometheus/latest/querying/basics/). [Andrew Newdigate](/company/team/#suprememoocow), a distinguished engineer on the GitLab infrastructure team, hypothesized that Prometheus query language can also be used to detect anomalies in time series data.\n\n[Andrew broke down the different ways Prometheus can be used](https://vimeo.com/341141334) for the attendees of [Monitorama 2019](https://monitorama.com/index.html). This blog post explains how anomaly detection works with Prometheus and includes the code snippets you’ll need to try it out for yourself on your own system.\n\n## Why is anomaly detection useful?\n\nThere are four key reasons why anomaly detection is important to GitLab:\n\n1. **Diagnosing incidents**: We can figure out which services are performing outside their normal bounds quickly and reduce the average time it takes to [detect an incident (MTTD)](/handbook/engineering/infrastructure/incident-management/), bringing about a faster resolution.\n2. **Detecting application performance regressions**: For example, if an n + 1 regression is introduced and leads to one service calling another at a very high rate, we can quickly track the issue down and resolve it.\n3. **Identify and resolve abuse**: GitLab offers free computing ([GitLab CI/CD](/topics/ci-cd/)) and hosting (GitLab Pages), and there are a small subset of users who might take advantage.\n4. **Security**: Anomaly detection is essential to spotting unusual trends in GitLab time series data.\n\nFor these reasons and many others, Andrew investigated whether it was possible to perform anomaly detection on GitLab time series data by simply using Prometheus queries and rules.\n\n## What level of aggregation is the correct one?\n\nFirst, time series data must be aggregated correctly. Andrew used a standard counter of `http_requests_total` as the data source for this demonstration, although many other metrics can be applied using the same techniques.\n\n```\nhttp_requests_total{\n job=\"apiserver\",\n method=\"GET\",\n controller=\"ProjectsController\",\n status_code=\"200\",\n environment=\"prod\"\n}\n```\n{: .language-ruby}\n\nThis example metric has **some extra dimensions**: `method`, `controller`, `status_code`, `environment`, as well as the dimensions that Prometheus adds, such as `instance` and `job`.\n\nNext, you must choose the correct level of aggregation for the data you are using. This is a bit of a Goldilocks problem – too much, too little, or just right – but it is essential for finding anomalies. By **aggregating the data too much**, it can be reduced to too few dimensions, creating two potential problems:\n\n1. You can miss genuine anomalies because the aggregation hides problems that are occurring within subsets of your data.\n2. If you do detect an anomaly, it's difficult to attribute it to a particular part of your system without more investigation into the anomaly.\n\nBut by **aggregating the data too little**, you might end up with a series of data with very small sample sizes which can lead to false positives and could mean flagging genuine data as outliers.\n\nJust right: Our experience has shown the **right level of aggregation is the service level**, so we include the job label and the environment label, but drop other dimensions. The data aggregation used through the rest of the talk includes: job `http requests`, rate five minutes, which is basically a rate across job and environment on a five-minute window.\n\n```\n- record: job:http_requests:rate5m\nexpr: sum without(instance, method, controller, status_code)\n(rate(http_requests_total[5m]))\n# --> job:http_requests:rate5m{job=\"apiserver\", environment=\"prod\"}  21321\n# --> job:http_requests:rate5m{job=\"gitserver\", environment=\"prod\"}  2212\n# --> job:http_requests:rate5m{job=\"webserver\", environment=\"prod\"}  53091\n```\n{: .language-ruby}\n\n## Using z-score for anomaly detection\n\nSome of the primary principles of statistics can be applied to detecting anomalies with Prometheus.\n\nIf we know the average value and [standard deviation (σ)](https://www.statisticshowto.datasciencecentral.com/probability-and-statistics/standard-deviation/) of a Prometheus series, we can use any sample in the series to calculate the z-score. The z-score is measured in the number of standard deviations from the mean. So a z-score of 0 would mean the z-score is identical to the mean in a data set with a normal distribution, while a z-score of 1 is 1.0 σ from the mean, etc.\n\nAssuming the underlying data has a normal distribution, 99.7% of the samples should have a z-score between zero to three. The further the z-score is from zero, the less likely it is to exist. We apply this property to detecting anomalies in the Prometheus series.\n\n1. Calculate the average and standard deviation for the metric using data with a large sample size. For this example, we use one week’s worth of data. If we assume we're evaluating the recording rule once a minute, over a one-week period we'll have just over 10,000 samples.\n\n```\n# Long-term average value for the series\n- record: job:http_requests:rate5m:avg_over_time_1w\nexpr: avg_over_time(job:http_requests:rate5m[1w])\n\n# Long-term standard deviation for the series\n- record: job:http_requests:rate5m:stddev_over_time_1w\nexpr: stddev_over_time(job:http_requests:rate5m[1w])\n```\n{: .language-ruby}\n\n2.  We can calculate the z-score for the Prometheus query once we have the average and standard deviation for the aggregation.\n\n```\n# Z-Score for aggregation\n(\njob:http_requests:rate5m -\njob:http_requests:rate5m:avg_over_time_1w\n) /  job:http_requests:rate5m:stddev_over_time_1w\n```\n{: .language-ruby}\n\nBased on the statistical principles of normal distributions, **we can assume that any value that falls outside of the range of roughly +3 to -3 is an anomaly**. We can build an alert around this principle. For example, we can get an alert when our aggregation is out of this range for more than five minutes.\n\n![Graph showing RPS on GitLab.com over 48 hours](https://about.gitlab.com/images/blogimages/prometheus_anomaly/image1.png){: .shadow.medium.center}\n\nGitLab.com Pages service RPS over 48 hours, with ±3 z-score region in green\n{: .note.text-center}\n\nZ-scores are a bit awkward to interpret on a graph because they don’t have a unit of measurement. But anomalies on this chart are easy to detect. Anything that appears outside of the green area (which denotes z-scores that fall within a range of +3 or -3) is an anomaly.\n\n### What if you don’t have a normal distribution?\n\n**But wait**: We make a big leap by assuming that our underlying aggregation has a normal distribution. If we calculate the z-score with data that isn’t normally distributed, our results will be incorrect.\n\nThere are numerous statistical techniques for testing your data for a normal distribution, but the best option is to test that your underlying data has a z-score of about **+4 to -4**.\n\n```\n(\n max_over_time(job:http_requests:rate5m[1w]) - avg_over_time(job:http_requests:rate5m[1w])\n) / stddev_over_time(job:http_requests:rate5m[1w])\n# --> {job=\"apiserver\", environment=\"prod\"}  4.01\n# --> {job=\"gitserver\", environment=\"prod\"}  3.96\n# --> {job=\"webserver\", environment=\"prod\"}  2.96\n\n(\n min_over_time(job:http_requests:rate5m[1w]) - avg_over_time(job:http_requests:rate5m[1w])\n) / stddev_over_time(job:http_requests:rate5m[1w])\n# --> {job=\"apiserver\", environment=\"prod\"}  -3.8\n# --> {job=\"gitserver\", environment=\"prod\"}  -4.1\n# --> {job=\"webserver\", environment=\"prod\"}  -3.2\n```\n{: .language-ruby}\n\nTwo Prometheus queries testing the minimum and maximum z-scores.\n{: .note.text-center}\n\nIf your results return with a range of +20 to -20, the tail is too long and your results will be skewed. Remember too that this needs to be run on an aggregated, not unaggregated series. Metrics that probably don’t have normal distributions include things like error rates, latencies, queue lengths etc., but many of these metrics will tend to work better with fixed thresholds for alerting anyway.\n\n## Detecting anomalies using seasonality\n\nWhile calculating z-scores works well with normal distributions of time series data, there is a second method that can yield _even more accurate_ anomaly detection results. **Seasonality** is a characteristic of a time series metric in which the metric experiences regular and predictable changes that recur every cycle.\n\n![Graph showing Gitaly RPS, Mon-Sun over four weeks](https://about.gitlab.com/images/blogimages/prometheus_anomaly/image2.png){: .shadow.medium.center}\n\nGitaly requests per second (RPS), Monday-Sunday, over four consecutive weeks\n{: .note.text-center}\n\nThis graph illustrates the RPS (requests per second) rates for Gitaly over seven days, Monday through Sunday, over four consecutive weeks. The seven-day range is referred to as the “offset,” meaning the pattern that will be measured.\n\nEach week on the graph is in a different color. The seasonality in the data is indicated by the consistency in trends indicated on the graph – every Monday morning, we see the same rise in RPS rates, and on Friday evenings, we see the RPS rates drop off, week after week.\n\nBy leveraging the seasonality in our time series data we can create more accurate predictions which will lead to better anomaly detection.\n\n### How do we leverage seasonality?\n\nCalculating seasonality with Prometheus required that we iterate on a few different statistical principles.\n\nIn the first iteration, we calculate by adding the growth trend we’ve seen over a one-week period to the value from the previous week. Calculate the growth trend by subtracting the rolling one-week average for last week from the rolling one-week average for now.\n\n```\n- record: job:http_requests:rate5m_prediction\n  expr: >\n    job:http_requests:rate5m offset 1w                     # Value from last period\n    + job:http_requests:rate5m:avg_over_time_1w            # One-week growth trend\n    - job:http_requests:rate5m:avg_over_time_1w offset 1w\n```\n\nThe first iteration is a bit narrow; we’re using a five-minute window from this week and the previous week to derive our predictions.\n\nIn the second iteration, we expand our scope by taking the average of a four-hour period for the previous week and comparing it to the current week. So, if we’re trying to predict the value of a metric at 8am on a Monday morning, instead of using the same five-minute window from one week prior, we use the average value for the metric from 6am until 10am for the previous morning.\n\n```\n- record: job:http_requests:rate5m_prediction\n  expr: >\n    avg_over_time(job:http_requests:rate5m[4h] offset 166h) # Rounded value from last period\n    + job:http_requests:rate5m:avg_over_time_1w             # Add 1w growth trend\n    - job:http_requests:rate5m:avg_over_time_1w offset 1w\n```\n{: .language-yaml}\n\nWe use the 166 hours in the query instead of one week because we want to use a four-hour period based on the current time of day, so we need the offset to be two hours short of a full week.\n\n![Comparing the real Gitaly RPS with our prediction](https://about.gitlab.com/images/blogimages/prometheus_anomaly/image3.png){: .shadow.medium.center}\n\nGitaly service RPS (yellow) vs prediction (blue), over two weeks.\n{: .note.text-center}\n\nA comparison of the actual Gitaly RPS (yellow) with our prediction (blue) indicate that our calculations were fairly accurate. However, this method has a flaw.\n\nGitLab usage was lower than the typical Wednesday because May 1 was International Labor Day, a holiday celebrated in many different countries. Because our growth rate is informed by the previous week’s usage, our predictions for the next week, on Wednesday, May 8, were for a lower RPS than it would have been had it not been a holiday on Wednesday, May 1.\n\nThis can be fixed by making three predictions for three consecutive weeks before Wednesday, May 1; for the previous Wednesday, the Wednesday before that, and the Wednesday before that. The query stays the same, but the offset is adjusted.\n\n```\n- record: job:http_requests:rate5m_prediction\n  expr: >\n   quantile(0.5,\n     label_replace(\n       avg_over_time(job:http_requests:rate5m[4h] offset 166h)\n       + job:http_requests:rate5m:avg_over_time_1w - job:http_requests:rate5m:avg_over_time_1w offset 1w\n       , \"offset\", \"1w\", \"\", \"\")\n     or\n     label_replace(\n       avg_over_time(job:http_requests:rate5m[4h] offset 334h)\n       + job:http_requests:rate5m:avg_over_time_1w - job:http_requests:rate5m:avg_over_time_1w offset 2w\n       , \"offset\", \"2w\", \"\", \"\")\n     or\n     label_replace(\n       avg_over_time(job:http_requests:rate5m[4h] offset 502h)\n       + job:http_requests:rate5m:avg_over_time_1w - job:http_requests:rate5m:avg_over_time_1w offset 3w\n       , \"offset\", \"3w\", \"\", \"\")\n   )\n   without (offset)\n```\n{: .language-yaml}\n\n![A graph showing three predictions for three Wednesdays vs. actual Gitaly RPS](https://about.gitlab.com/images/blogimages/prometheus_anomaly/image4.png){: .shadow.medium.center}\n\nThree predictions for three Wednesdays vs actual Gitaly RPS, Wednesday, May 8 (one week following International Labor Day)\n{: .note.text-center}\n\nOn the graph we’ve plotted Wednesday, May 8 and three predictions for the three consecutive weeks before May 8. We can see that two of the predictions are good, but the May 1 prediction is still far off base.\n\nAlso, we don’t want three predictions, we want **one prediction**. Taking the average is not an option, because it will be diluted by our skewed May 1 RPS data. Instead, we want to calculate the median. Prometheus does not have a median query, but we can use a quantile aggregation in lieu of the median.\n\nThe one problem with this approach is that we're trying to include three series in an aggregation, and those three series are actually all the same series over three weeks. In other words, they all have the same labels, so connecting them is tricky. To avoid confusion, we create a label called `offset` and use the label-replace function to add an offset to each of the three weeks. Next, in the quantile aggregation, we strip that off, and that gives us the middle value out of the three.\n\n```\n- record: job:http_requests:rate5m_prediction\n  expr: >\n   quantile(0.5,\n     label_replace(\n       avg_over_time(job:http_requests:rate5m[4h] offset 166h)\n       + job:http_requests:rate5m:avg_over_time_1w - job:http_requests:rate5m:avg_over_time_1w offset 1w\n       , \"offset\", \"1w\", \"\", \"\")\n     or\n     label_replace(\n       avg_over_time(job:http_requests:rate5m[4h] offset 334h)\n       + job:http_requests:rate5m:avg_over_time_1w - job:http_requests:rate5m:avg_over_time_1w offset 2w\n       , \"offset\", \"2w\", \"\", \"\")\n     or\n     label_replace(\n       avg_over_time(job:http_requests:rate5m[4h] offset 502h)\n       + job:http_requests:rate5m:avg_over_time_1w - job:http_requests:rate5m:avg_over_time_1w offset 3w\n       , \"offset\", \"3w\", \"\", \"\")\n   )\n   without (offset)\n```\n{: .language-yaml}\n\nNow, our prediction deriving the median value from the series of three aggregations is much more accurate.\n\n![Graph showing median predications vs. actual Gitaly RPS on Weds May 8](https://about.gitlab.com/images/blogimages/prometheus_anomaly/image5.png){: .shadow.medium.center}\n\nMedian predictions vs actual Gitaly RPS, Wednesday, May 8 (one week following International Labor Day)\n{: .note.text-center}\n\n### How do we know our prediction is truly accurate?\n\nTo test the accuracy of our prediction, we can return to the z-score. We can use the z-score to measure the sample's distance from its prediction in standard deviations. The more standard deviations away from our prediction we are, the greater the likelihood is that a particular value is an outlier.\n\n![Predicted normal range +1.5σ/-1.5σ](https://about.gitlab.com/images/blogimages/prometheus_anomaly/image6.png){: .shadow.medium.center}\n\nPredicted normal range ± 1.5σ for Gitaly Service\n{: .note.text-center}\n\nWe can update our Grafana chart to use the seasonal prediction rather than the weekly rolling average value. The range of normality for a certain time of day is shaded in green. Anything that falls outside of the shaded green area is considered an outlier. In this case, the outlier was on Sunday afternoon when our cloud provider encountered some network issues.\n\nUsing boundaries of ±2σ on either side of our prediction is a pretty good measurement for determining an outlier with seasonal predictions.\n\n## How to set up alerting using Prometheus\n\nIf you want to set up alerts for anomaly events, you can apply a pretty straightforward rule to Prometheus that checks if the z-score of the metric is between a standard deviation of **+2 or -2**.\n\n```\n- alert: RequestRateOutsideNormalRange\n  expr: >\n   abs(\n     (\n       job:http_requests:rate5m - job:http_requests:rate5m_prediction\n     ) / job:http_requests:rate5m:stddev_over_time_1w\n   ) > 2\n  for: 10m\n  labels:\n    severity: warning\n  annotations:\n    summary: Requests for job {{ $labels.job }} are outside of expected operating parameters\n```\n{: .language-yaml}\n\nAt GitLab, we use a custom routing rule that pings Slack when any anomalies are detected, but doesn’t page our on-call support staff.\n\n## The takeaway\n\n1. Prometheus can be used for some types of anomaly detection\n2. The right level of data aggregation is the key to anomaly detection\n3. Z-scoring is an effective method, if your data has a normal distribution\n4. Seasonal metrics can provide great results for anomaly detection\n\nWatch Andrew’s full presentation from [Monitorama 2019](https://monitorama.com/index.html). If you have questions for Andrew, reach him on Slack at #talk-andrew-newdigate. You can also read more about [why you need Prometheus](/blog/why-all-organizations-need-prometheus/).\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://player.vimeo.com/video/341141334?portrait=0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n",[915],{"slug":6163,"featured":6,"template":678},"anomaly-detection-using-prometheus","content:en-us:blog:anomaly-detection-using-prometheus.yml","Anomaly Detection Using Prometheus","en-us/blog/anomaly-detection-using-prometheus.yml","en-us/blog/anomaly-detection-using-prometheus",{"_path":6169,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6170,"content":6176,"config":6181,"_id":6183,"_type":16,"title":6184,"_source":17,"_file":6185,"_stem":6186,"_extension":20},"/en-us/blog/elasticsearch-update",{"title":6171,"description":6172,"ogTitle":6171,"ogDescription":6172,"noIndex":6,"ogImage":6173,"ogUrl":6174,"ogSiteName":692,"ogType":693,"canonicalUrls":6174,"schema":6175},"Update: The challenge of enabling Elasticsearch on GitLab.com","How we got started with enabling Elasticsearch on the largest GitLab instance, GitLab.com.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666832/Blog/Hero%20Images/enable-global-search-elasticsearch.jpg","https://about.gitlab.com/blog/elasticsearch-update","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Update: The challenge of enabling Elasticsearch on GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nick Thomas\"}],\n        \"datePublished\": \"2019-07-16\",\n      }",{"title":6171,"description":6172,"authors":6177,"heroImage":6173,"date":6178,"body":6179,"category":14,"tags":6180},[5851],"2019-07-16","\nBack in March, [Mario](/company/team/#mdelaossa) shared some of the [lessons we'd learned from our last attempt to enable\nElasticsearch](/blog/enabling-global-search-elasticsearch-gitlab-com/) on GitLab.com, an integration that would unlock both [Advanced Global Search](https://docs.gitlab.com/ee/user/search/advanced_search.html)\nand [Advanced Syntax Search](https://docs.gitlab.com/ee/user/search/advanced_search.html). Since then, we've been working hard to address problems with the integration and prepare for [another attempt](https://gitlab.com/groups/gitlab-org/-/epics/853).\n\n## Selective indexing\n\nAt the heart of our dilemma was a classic \"chicken and egg\" problem. We needed\nto gather more information about [Elasticsearch](https://docs.gitlab.com/ee/integration/advanced_search/elasticsearch.html) to make improvements to the total\nindex size, but without an active deployment, that information was very hard to\ngather. Customer feedback and small-scale testing in development environments\nall help, but [dogfooding](https://handbook.gitlab.com/handbook/values/#dogfooding)\nthe integration is the best way to get the information we require.\n\nTo resolve this, we prioritized changes to enable Elasticsearch integration on\nGitLab.com. Since the index size was a hard problem, this meant some kind of\nselective indexing was necessary, so we've added\n[per-project and per-group controls](https://docs.gitlab.com/ee/integration/advanced_search/elasticsearch.html).\n\nOn Jun. 24, 2019, we enabled the integration for the `gitlab-org` group on\nGitLab.com. Now, any searches at the group or project level will make use of the\nElasticsearch index, and the advanced features the integration unlocks will be available.\nWe figured, why not [give it a try](https://gitlab.com/search?search=gitlab-org+%28gitaly+%7C+ee%29&group_id=9970)?\n\nThe total index size for this group – which includes about 500 projects – is around 2.2\nmillion documents and 15GB of data, which is really easy to manage from the point of view of\nElasticsearch administration. The indexing operation itself didn't [go as smoothly as we hoped](https://gitlab.com/gitlab-com/gl-infra/production/issues/800), however!\n\n## Bug fixes\n\nAnother advantage to having selective Elasticsearch indexing enabled on GitLab.com\nis that our engineers need confidence that the feature is performant,\nthat it won't threaten the overall stability of GitLab.com, and that it is\nsubstantially bug-free. So we went through a [Production Readiness Review](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/64)\nbefore enabling it. The review uncovered a number of pre-existing bugs and new regressions, which have all been fixed in the\n[12.0 release](/releases/2019/06/22/gitlab-12-0-released/). Some of the bugs included:\n\n* [Elasticsearch was sometimes used for searches, even when disabled](https://gitlab.com/gitlab-org/gitlab-ee/issues/11795)\n* [Performance regression indexing database content](https://gitlab.com/gitlab-org/gitlab-ee/issues/11595)\n* [Regression searching for some projects at group level](https://gitlab.com/gitlab-org/gitlab-ee/issues/12091)\n* [Regression visiting page 2 of search results](https://gitlab.com/gitlab-org/gitlab-ee/issues/12254)\n* [Wiki indexing still relied on a shared filesystem](https://gitlab.com/gitlab-org/gitlab-ee/issues/11269)\n* [Searching snippets with Elasticsearch enabled still queries the database, not Elasticsearch](https://gitlab.com/gitlab-org/gitlab-ee/issues/10548)\n\nWe still can't claim to be bug-free, of course, but the picture is a lot rosier than if we'd attempted to roll out this feature without first using it ourselves.\n\nWe'd tested the new indexing code on our staging environment, but this was last\nrefreshed more than a year ago, and was significantly smaller than the group on\nGitLab.com, containing around 150 projects. As a result, some bugs and\nscalability issues were uncovered for the first time in production. We're\naddressing them with high priority in the 12.1 and 12.2 releases. The scaling issues include:\n\n* [Project imports unconditionally enqueue an ElasticCommitIndexerWorker](https://gitlab.com/gitlab-org/gitlab-ee/issues/12362)\n* [Allow maximum bulk request size to be configured](https://gitlab.com/gitlab-org/gitlab-ee/issues/12375)\n* [Intelligently retry bulk-insert failures when indexing](https://gitlab.com/gitlab-org/gitlab-ee/issues/12372)\n* [Note bulk indexing often fails due to statement timeout](https://gitlab.com/gitlab-org/gitlab-ee/issues/12402)\n* [Cannot index large snippets](https://gitlab.com/gitlab-org/gitlab-ee/issues/12111)\n* [Removing documents from the index can fail with a conflict error](https://gitlab.com/gitlab-org/gitlab-ee/issues/12114)\n\nOnce these issues are addressed, indexing at scale should be quick, easy, and\nreliable. Indexing at scale is invaluable from the point of view of an engineer trying out\nchanges to reduce total index size.\n\n## Administration\n\nAnother area for improvement is administering the indexing process\nitself. Although GitLab automatically creates, updates, and removes documents\nfrom the index when changes are made, backfilling existing data required manual\nintervention, running a set of complicated (and slow) rake tasks to get the\npre-existing data into the Elasticsearch index. Unless these instructions were\nfollowed correctly, search results would be incomplete. There was also no way\nto configure a number of important parameters for the indexes created by GitLab.\n\nWhen using the selective indexing feature, GitLab now automatically enqueues\n\"backfill\" tasks for groups and projects as they are added, and removes the\nrelevant records from the index when they are supposed to be removed. We've also made it possible to\n[configure the number of shards and replicas](https://docs.gitlab.com/ee/integration/advanced_search/elasticsearch.html)\nfor the Elasticsearch index directly in the admin panel, so when GitLab creates\nthe index for you, there's no need to manually change the parameters afterwards.\n\nPersonal snippets are the one type of document that won't be respected in the\nselective-indexing case. To ensure they show up in search results, you'll still\nneed to run the [`gitlab:elastic:index_snippets`](https://docs.gitlab.com/ee/integration/advanced_search/elasticsearch.html) rake task [for now](https://gitlab.com/gitlab-org/gitlab-ee/issues/12333).\n\nThere are also improvements if you're not using selective indexing – the admin\narea now has a \"Start indexing\" button. Right now, this only makes sense if\nstarting from an empty index, and doesn't index personal snippets either, but\nwe're hopeful we can [remove the rake tasks entirely](https://gitlab.com/gitlab-org/gitlab-ee/issues/11206)\nin the future.\n\n## What next?\n\nWe're really happy to have Elasticsearch enabled for the `gitlab-org` group, but\nthe eventual goal is to have it [enabled on all of GitLab.com](https://gitlab.com/groups/gitlab-org/-/epics/153).\nWe'll be rolling it out to more groups in the future.\n\nTo get there, we'll need to continue to improve the\n[administration experience](https://gitlab.com/groups/gitlab-org/-/epics/428) using Elasticsearch.\nFor instance, it's still difficult to see the indexing status of a group or\nproject at a glance, a function that would be really useful for our support team to answer\nqueries like \"Why isn't this search term returning the expected results?\"\n\n### Managing the Elasticsearch schema is also a challenge\n\nCurrently, we take the easy route of reindexing everything if we need to change some aspect of the\nschema, which doesn't scale well as the index gets larger. [Some\nwork on this is ongoing](https://gitlab.com/gitlab-org/gitlab-ee/issues/328),\nand the eventual goal is for GitLab to automatically manage changes to the\nElasticsearch index in the same way it does for the database.\n\n[Reducing the index size](https://gitlab.com/groups/gitlab-org/-/epics/429) is\nstill a huge priority, and we hope to make progress on this now that we\nhave an Elasticsearch deployment to iterate against.\n\n### We'd also like to improve the quality of search results\n\nFor example, we have\nreports of code search [failing to find certain identifiers](https://gitlab.com/gitlab-org/gitlab-ee/issues/10693) and we'd like to use the Elasticsearch index in more contexts, such as for\n[filtered search](https://gitlab.com/gitlab-org/gitlab-ee/issues/12082).\n\nThe Elasticsearch integration is progressing. Finally, responsibility for the Elasticsearch integration has been passed from\nthe [Plan stage](/handbook/product/categories/#plan-stage)\nto the [Editor group of the Create stage](/handbook/product/categories/#editor-group).\nI hope you'll join Mario and me in wishing [Kai](/company/team/#phikai),\n[Darva](/company/team/#DarvaSatcher), and the rest of the team the best of luck in tackling the remaining challenges for Elasticsearch. An up-to-date overview of their plans can always be found on\nthe [search strategy](/direction/global-search/) page.\n\nPhoto by [Benjamin Elliott](https://unsplash.com/photos/vc9u77c0LO4) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[749,232,915],{"slug":6182,"featured":6,"template":678},"elasticsearch-update","content:en-us:blog:elasticsearch-update.yml","Elasticsearch Update","en-us/blog/elasticsearch-update.yml","en-us/blog/elasticsearch-update",{"_path":6188,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6189,"content":6195,"config":6201,"_id":6203,"_type":16,"title":6204,"_source":17,"_file":6205,"_stem":6206,"_extension":20},"/en-us/blog/building-a-ux-research-insights-repository",{"title":6190,"description":6191,"ogTitle":6190,"ogDescription":6191,"noIndex":6,"ogImage":6192,"ogUrl":6193,"ogSiteName":692,"ogType":693,"canonicalUrls":6193,"schema":6194},"Why we built a UX Research Insights repository","One of the biggest challenges faced by UX researchers is organizing and storing user research effectively, so that anyone can find and use insights.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678561/Blog/Hero%20Images/open-course-environment.jpg","https://about.gitlab.com/blog/building-a-ux-research-insights-repository","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we built a UX Research Insights repository\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarah O’Donnell\"}],\n        \"datePublished\": \"2019-07-10\",\n      }",{"title":6190,"description":6191,"authors":6196,"heroImage":6192,"date":6198,"body":6199,"category":14,"tags":6200},[6197],"Sarah O’Donnell","2019-07-10","\nI joined GitLab around two and a half years ago. At the time, I was GitLab’s first and only UX researcher. I’d hunt out issues in the [GitLab CE](https://gitlab.com/gitlab-org/gitlab-ce/) project where I felt I could add value. Usually, a member of the Product or UX/Product Design team would open the issue, which was often sparked by user feedback from social media, during a customer meeting, or even in response to a prior issue. It was my responsibility to help the teams determine how we could best address user needs, motivations, and pain points, or if the request was an edge case. I documented my research questions and insights in the associated issues living in the CE project. However, this approach had some problems:\n\nBack then, the formatting options available for issues was in its infancy. It was difficult to structure and share data in a clear and concise way. Like most good researchers, I’d always learn more than what I intended to during a study. I created new issues for the insights we weren’t previously aware of and didn’t have documented. Epics didn’t exist yet, so there was no way to collectively group issues from the same study. I could label issues, but we discovered (maybe ironically) with UX research that GitLab’s search functionality needed improvements. The GitLab CE project contains more than 50,000 issues, so trying to find and action an insight was like trying to find a needle in a haystack.\n\nEnter the [UX Research repository](https://gitlab.com/gitlab-org/ux-research) and research reports. As the Product and UX/Product Design teams grew, so did the demand for UX research. Product managers and UX/product designers needed greater visibility into what I was working on so they had a sense of my availability for projects, and I needed a way to manage incoming research requests. We had success by creating a dedicated repository for UX Research requests and then using checklists within issues to track my progress against each request. However, I still had my original problem of needing to store and disseminate research insights. I resorted to Google Docs and began producing reports of my insights. This worked well for a little while, but then the cracks started to show.\n\n## The problem with reports\n\n### They are not searchable\nWhenever anybody asked me if I had witnessed users experiencing a particular problem, I’d rack my brain trying to work out which research report might contain the answer. I’d sift through multiple reports, scanning everything I had previously written. The situation became worse when we added new UX researchers to the team who began producing their own reports. I had a vague idea of what was in my own reports, but I didn't know where to start with reports produced by other UX researchers.\n\n### They create research silos\nAs I searched through dozens of reports, I realized research findings were inaccessible. UX researchers were spending a large part of their days searching through past insights, when their time would be better spent speaking with users and uncovering new insights. Everybody should be able to find research swiftly and easily without needing a researcher to find it for them.\n\n### They are not actionable\nAt GitLab, we use issues to solve problems, develop ideas, and collaborate. [One of our values is iteration](\n/handbook/values/#iteration): We do the smallest thing possible and get it out as quickly as possible. UX research reports were not small; they often contained many insights. Just one insight could lead to multiple, iterative changes to the user interface. We ended up copying parts of our reports into issues, which felt like a duplication of effort.\n\n### They quickly become outdated\nOur research reports directly addressed the research questions formed with the Product and UX/Product Design teams and were extremely focused on a topic or feature. GitLab is a rapidly growing product; consequently, our research reports became outdated very quickly. Reports that felt ‘old’ or ‘stale’ were rarely revisited, but the reports contained insights that could be triangulated with more recent research. Reports didn’t provide an easy way to access this important data in the future.\n\n## Finding a solution\nI wanted to confirm whether people outside of the UX Research team also felt these problems. I set up 1:1 interviews with every product manager at GitLab. In these interviews, I learned reports weren’t working for our product managers either. If something requires their attention, they want it in an issue.\n\nI read (lots) of articles on [Atomic Research](https://medium.com/@tsharon/foundations-of-atomic-research-a937d5da5fbb) and realized we could use a similar approach for managing our insights. Better yet, I felt we could [dogfood](https://handbook.gitlab.com/handbook/values/#dogfooding) our approach.\n\n## Introducing the UXR Insights repository\n\nThe [UXR Insights repository](https://gitlab.com/gitlab-org/uxr_insights) is the new single source of truth for all user insights discovered by GitLab’s UX researchers and UX/product designers. Instead of reports, we use issues to document key findings from research studies.\n\nYou may be wondering why we reverted to issues, given the problems of a couple of years ago. GitLab’s [issue functionality](https://docs.gitlab.com/ee/user/project/issues/#issues) has improved immensely since then. There’s now a range of formatting options for issues, and our [search functionality](https://docs.gitlab.com/ee/user/search/#issues-and-merge-requests-per-project) includes the ability to search by labels.\n\nWe use labels to tag and organize insights. This allows anyone to quickly search and filter through issues to find the insights they need. Unlike in a report, insights are continually added. This means that you’ll receive a dynamic list of results when searching through the repository.\n\nWe use [epics](https://docs.gitlab.com/ee/user/group/epics/) and GitLab’s [related issues functionality](https://docs.gitlab.com/ee/user/project/issues/related_issues.html) to track issues from the same research study. The epic description usually contains our research methodology and any background information about users.\n\nOpen issues and epics indicate that the research study is still in progress and the UX researcher and/or UX/product designer is still adding insights to the repository. Closed issues and epics indicate that the research study is finished.\n\nEach insight is supported with evidence, typically in the form of a video clip or statistical data. Unlike the atomic research approach, some lightweight research synthesis takes place before insights are added to the repository (which is why we also call them ‘insights’ rather than ‘nuggets’ or ‘observations’). While every issue within the repository contains a single insight on a particular topic, the insight can relate to multiple users.\n\nFor example: We’re conducting some usability testing. Four out of the five users we tested with experienced the same problem. Rather than open four separate issues, we’ll create one issue, but we’ll include four supporting pieces of evidence (four video clips – one for each user) in the single issue.\n\nWe’re also experimenting with using the UXR Insights repository for quantitative forms of research, such as surveys. Each survey insight focuses on a key theme/question (for example: mobile usage) and is supported by data derived from the survey results.\n\n## Challenges and what the future holds\n\nOur biggest challenge was transferring all our research reports into the [UXR Insights repository](https://gitlab.com/gitlab-org/uxr_insights). The team has collected a lot of data over the years, so it was a mammoth task. We never envisioned moving our research to an insights repository when we originally wrote and formatted our reports. Retrospectively adding insights means we’ve had to make some compromises; we haven’t always been able to use the insight structure that we want to use going forward.\n\nA second challenge is training new and existing members of the UX department how to use the insights repository. We believe [everyone can contribute](\n/company/mission/#mission). The UX Research team are not gatekeepers to research. We want everyone to be able to conduct research effectively and to be able to accurately add their findings to the insights repository. As a starting point, we’ve added [templates](https://docs.gitlab.com/ee/user/project/description_templates.html) to the repository that guide users through the process of adding insights.\n\nWe decided to keep our insights separate from the GitLab CE and EE projects, which is where our Product and UX/Product Design teams typically work. Not all of our insights are necessarily actionable right away – sometimes more evidence is required (especially with the gems we unintentionally discover during our studies). We needed a place where we could store and share these insights, while continuing to discuss and research them. The UXR Insights repository is within the [GitLab.org group](https://gitlab.com/gitlab-org), meaning that product managers who create [issue boards at a group level](https://docs.gitlab.com/ee/user/project/issue_board.html#group-issue-boards) to manage their workflow can simply add an insight to their board when they are ready to act on it. Or they can [cross-link](https://docs.gitlab.com/ee/user/project/issues/crosslinking_issues.html#crosslinking-issues) to the insight in a supporting issue or epic.\n\nThis is our first iteration of the UXR Insights repository. We expect improvements will be required along the way, and the UX team is planning to review how the repository is working after 90 days. However, early signs indicate that (unsurprisingly) no UX researchers are missing writing reports!\n\nCover image by [chuttersnap](https://unsplash.com/photos/Y94yKEyNjVw) on [Unsplash](https://unsplash.com)\n{: .note}\n",[1144,959],{"slug":6202,"featured":6,"template":678},"building-a-ux-research-insights-repository","content:en-us:blog:building-a-ux-research-insights-repository.yml","Building A Ux Research Insights Repository","en-us/blog/building-a-ux-research-insights-repository.yml","en-us/blog/building-a-ux-research-insights-repository",{"_path":6208,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6209,"content":6215,"config":6221,"_id":6223,"_type":16,"title":6224,"_source":17,"_file":6225,"_stem":6226,"_extension":20},"/en-us/blog/git-performance-on-nfs",{"title":6210,"description":6211,"ogTitle":6210,"ogDescription":6211,"noIndex":6,"ogImage":6212,"ogUrl":6213,"ogSiteName":692,"ogType":693,"canonicalUrls":6213,"schema":6214},"What we're doing to fix Gitaly NFS performance regressions","How we're improving our Git IO patterns to fix performance regressions when running Gitaly on NFS.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670065/Blog/Hero%20Images/git-performance-nfs.jpg","https://about.gitlab.com/blog/git-performance-on-nfs","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What we're doing to fix Gitaly NFS performance regressions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"James Ramsay\"},{\"@type\":\"Person\",\"name\":\"Zeger-Jan van de Weg\"}],\n        \"datePublished\": \"2019-07-08\",\n      }",{"title":6210,"description":6211,"authors":6216,"heroImage":6212,"date":6218,"body":6219,"category":14,"tags":6220},[6217,4476],"James Ramsay","2019-07-08","\nFrom the start, Gitaly, GitLab's service that is the interface to our Git data,\nfocused on removing the dependency on NFS. We achieved this task at the end\nof the summer 2018, when the [NFS drives were unmounted on GitLab.com][gitaly-nfs-blog].\nThe migration was geared towards improving the availability of Git data at\nGitLab and correctness, that is: fixing bugs. To an extent, performance\nwas an afterthought. By rewriting most of the RPCs in Go there were side effects\nthat positively improved performance, but conversely there were also occasions\nwhere performance wasn't addressed immediately, but rather added to the backlog\nfor the next iteration.\n\nSince releasing Gitaly 1.0, and updating GitLab to use Gitaly instead of Rugged\nfor all Git operations, we have observed severe performance regressions for\nlarge GitLab instances when using NFS. To address these performance problems in\nGitLab 11.9, we added [feature flags][feature-flag-docs] to enable\nRugged implementations that improve performance for affected GitLab instances.\nThese have been back ported to 11.5-11.8.\n\n### So what's the problem?\n\nWhile the migration was under way, there were noticeable performance regressions.\nIn most cases, these were so-called N + 1 access patterns. One example was the\n[pipeline index view](https://gitlab.com/gitlab-org/gitlab-ce/pipelines/), where\neach pipeline runs on a commit. On that page, GitLab used to call the `FindCommit`\nRPC for each pipeline. To improve performance, a new RPC was added;\n`ListCommitsByOid`. In which case, the object IDs for the commits were collected\nfirst, once request was made to Gitaly to get all the commits and return them to\ncontinue rendering the view.\n\nThis approach was, and still is, successful. However, detecting these N + 1\nqueries is hard. When GitLab is run for development as part of the GDK, or\nduring testing, a special N + 1 detector will raise an error if an N + 1\noccurred. This approach has several shortcomings, for one; most tests will only\ntest the behavior of one entity, not 20. This reduces the likelihood of the\nerror being raised. There is also a way to silence N + 1 errors, for example:\n\n```ruby\nproject = Project.find(1)\n\nGitalyClient.allow_n_plus_1 do\n  project.pipelines.last(20).each do |pipeline|\n    project.repository.find_commit(pipeline.sha)\n  end\nend\n\n# The better solution would be\n\nshas = project.pipelines.last(20).select(&:sha)\nrepository.list_commits_by_oid(shas)\n```\n\nWhatever happened in that block would not be counted. For each of these blocks\nissues were created and added to [an epic][epic-nplus1], however, little\nprogress was made by the teams who had bypassed these checks in this way. This\nwas primarily because these performance issues were not a big\nproblem for GitLab.com, despite the fact they had become a problem for our customers.\n\nThe detected N + 1 issues included a lot of Git object read operations, for\nexample the `FindCommit` RPC. This is especially bad because this requires a\nnew Git process to be invoked to fetch each commit. If a millisecond later\nanother request comes in for the same repository, Gitaly will invoke Git again\nand Git will do all this work again. Before the migration and when GitLab.com\nwas still using NFS, GitLab leveraged Rugged, and used memoization to keep around\nthe Rugged Repository until the Rails request was done. This allowed Rugged to\nload part of the Git repository into memory for faster access for subsequent\nrequests. This property was not recreated in Gitaly for some time.\n\n## Enter cat-file cache\n\nIn GitLab 12.1, Gitaly will cache a repository per Rails session to recreate this\nbehavior with a feature called ['cat-file' cache](https://gitlab.com/gitlab-org/gitaly/merge_requests/1203).\nTo explain how this cache works and its name, it should be noted that objects\nin Git are compressed using [zlib][zlib]. This means that a commit object\nisn't packed and can be located on disk, it seemingly contains garbage:\n\n```\n# This example is an empty .gitkeep file\n$ cat .git/objects/e6/9de29bb2d1d6434b8b29ae775ad8c2e48c5391\nxKOR0`\n```\n\nNow cat-file will query for the object, and when using the `-p` flag pretty print\nit. In the following example, the current [Gitaly license][gitaly-mit].\n\n```\n$ git cat-file -p c7344c56da804e88a0bca979a53e1ec1c8b6021e\nThe MIT License (MIT)\n... ommitted\n```\n\nCat-file has another flag, `--batch`, which allows for multiple objects to be\nrequested to the same process through STDIN.\n\n```\n$ git cat-file --batch\nc7344c56da804e88a0bca979a53e1ec1c8b6021e\nc7344c56da804e88a0bca979a53e1ec1c8b6021e blob 1083\nThe MIT License (MIT)\n\n... ommitted\n```\n\nInspecting the Git process using [strace][strace] allows us to inspect how Git\namortizes expensive operations to improve performance. The output on STDOUT and\nthe strace are available [as a snippet](https://gitlab.com/snippets/1858975).\n\nThe process is reading the first input from STDIN, or file descriptor 0, at\n[line 141](https://gitlab.com/snippets/1858975#L141). It starts writing the output\nabout [40 syscalls later](https://gitlab.com/snippets/1858975#L180). In between\nthere are two important operations performed: an\n[mmap of the pack file index](https://gitlab.com/snippets/1858975#L167), and\nanother [mmap of the pack file itself](https://gitlab.com/snippets/1858975#L177).\nThese operations store part of these files in memory, so that they are available\nthe next time they are needed.\n\nIn the snippet, we've requested the same blob on the same process again. This a\nsyntactic follow-up request, but even when the next request would've been `HEAD`\nGit would have to do a considerable amount less work to come up with the object\nthat `HEAD` deferences to.\n\nKeeping a cat-file process around for subsequent requests was shipped in\nGitLab 11.11 behind the `gitaly_catfile-cache` feature flag, and will be\n[enabled by default][remove-ff] in GitLab 12.1.\n\n### Next steps\n\nThe `cat-file` cache is one of many improvements being made to improve Git IO\npatterns in GitLab, to mitigate slow IO when using NFS and improve performance\nof GitLab. Particularly, progress has been made in GitLab 11.11, and continues\nto be made in eliminating the worst N + 1 access patterns from GitLab. You can\nfollow [gitlab-org&1190][epic-worst-io] for\nthe full plan and progress.\n\nThe Gitaly team's highest priority is\n[automatically enabling Rugged][automatic-rugged]\nfor GitLab servers using NFS to immediately mitigate the performance\nregressions until performance improvements are sufficiently complete in GitLab\nand Gitaly, allowing Rugged to again be removed.\n\nIn the future, we will remove the need for NFS with\n[High Availability for Gitaly][ha-epic], providing both performance and\navailability, and eliminating the burden of maintaining an NFS cluster.\n\nCover image by [Jannes Glas](https://unsplash.com/@jannesglas) on [Unsplash](https://unsplash.com/photos/P6iOpqQpwwU)\n{: .note}\n\n[automatic-rugged]: https://gitlab.com/gitlab-org/gitlab-ce/issues/60931\n[epic-nplus1]: https://gitlab.com/groups/gitlab-org/-/epics/827\n[epic-worst-io]: https://gitlab.com/groups/gitlab-org/-/epics/1190\n[feature-flag-docs]: https://docs.gitlab.com/ee/administration/nfs.html#improving-nfs-performance-with-gitlab\n[gitaly-mit]: https://gitlab.com/gitlab-org/gitaly/blob/1b09f13374be5b272d40b3b089372adae2801f81/LICENSE\n[gitaly-nfs-blog]: /2018/09/12/the-road-to-gitaly-1-0/\n[ha-epic]: https://gitlab.com/groups/gitlab-org/-/epics/842\n[remove-ff]: https://gitlab.com/gitlab-org/gitaly/issues/1671\n[strace]: https://strace.io/\n[zlib]: https://www.zlib.net/\n",[702,704],{"slug":6222,"featured":6,"template":678},"git-performance-on-nfs","content:en-us:blog:git-performance-on-nfs.yml","Git Performance On Nfs","en-us/blog/git-performance-on-nfs.yml","en-us/blog/git-performance-on-nfs",{"_path":6228,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6229,"content":6235,"config":6240,"_id":6242,"_type":16,"title":6243,"_source":17,"_file":6244,"_stem":6245,"_extension":20},"/en-us/blog/group-conversation-podcast",{"title":6230,"description":6231,"ogTitle":6230,"ogDescription":6231,"noIndex":6,"ogImage":6232,"ogUrl":6233,"ogSiteName":692,"ogType":693,"canonicalUrls":6233,"schema":6234},"How we turn our group conversations into a podcast with GitLab CI/CD","Want to listen to meetings on the go? Senior SRE John Jarvis explains how he turned his favorite remote meetings at GitLab into podcast format.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678626/Blog/Hero%20Images/group-conversation-podcast.jpg","https://about.gitlab.com/blog/group-conversation-podcast","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we turn our group conversations into a podcast with GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Jarvis\"}],\n        \"datePublished\": \"2019-07-03\",\n      }",{"title":6230,"description":6231,"authors":6236,"heroImage":6232,"date":6237,"body":6238,"category":14,"tags":6239},[4885],"2019-07-03","\n[Group conversations](/handbook/group-conversations/) are my favorite remote meetings at\nGitLab because they are a great way to get an inside peek at what different teams are doing,\nhow they collaborate, and what features you might find in future GitLab releases.\nYou may already know that we have been livestreaming these on\n[GitLab Unfiltered](https://www.youtube.com/channel/UCMtZ0sc1HHNtGGWZFDRTh5A) for anyone curious about how GitLab operates.\n\nLately, when I have time to listen to these unfiltered discussions I am either not at a screen or not in a place\nwhere it is easy to watch a video. After seeing how [Support turned their weekly meeting into a podcast](/blog/how-we-turned-40-person-meeting-into-a-podcast/),\nI thought it would be nice to make the GitLab group conversation meetings into a podcast as well!\n\n[Subscribe to the GitLab Group Conversations podcast](https://gitlab-com.gitlab.io/gl-infra/podcasts/#podcasts)\n{: .alert .alert-gitlab-purple .text-center}\n\nNow in addition to the livestreams and videos, there is a podcast feed for GitLab group conversations.\nListen to these conversations on your favorite podcast player by accessing the feed on\n[the Group Conversations podcast page](https://gitlab-com.gitlab.io/gl-infra/podcasts/#podcasts).\n\nIf you like the format, please let us know by tweeting us [@GitLab](https://twitter.com/gitlab)\nand we will consider adding more!\n\n### Here is a bit more detail about how these podcasts are generated\n\n* Teams that livestream group conversations\n  [follow instructions  for broadcasting it live](/handbook/group-conversations/#livestream-the-video)\n  and creating the video. When the meeting is over, the video is made available on GitLab Unfiltered.\n\n* A daily GitLab CI job in the [podcasts project](https://gitlab.com/gitlab-com/gl-infra/podcasts)\n  downloads the group conversation videos and converts them to audio files. It's easy to create [pipeline schedules in GitLab](https://docs.gitlab.com/ee/ci/pipelines/schedules.html).\n\n  ![The podcast schedule](https://about.gitlab.com/images/blogimages/podcast-schedule.png){: .shadow.medium.center}\n\n* An RSS feed is generated and audio files are uploaded to object storage from the CI job\n\n* GitLab pages is used to host a static site to link to the feed\n\n* This is all automated in a CI pipeline that runs every hour!\n\n![Podcast pipelines](https://about.gitlab.com/images/blogimages/podcast-pipeline.png){: .shadow.medium.center}\n\nI hope you have the opportunity to tune into the group conversations at GitLab and\nalso take advantage of GitLab CI features like schedules to help automate your own\nworkflows!\n\nPhoto by [Lee Campbell](https://unsplash.com/@leecampbell?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/headphones?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[915,2409,110],{"slug":6241,"featured":6,"template":678},"group-conversation-podcast","content:en-us:blog:group-conversation-podcast.yml","Group Conversation Podcast","en-us/blog/group-conversation-podcast.yml","en-us/blog/group-conversation-podcast",{"_path":6247,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6248,"content":6254,"config":6262,"_id":6264,"_type":16,"title":6265,"_source":17,"_file":6266,"_stem":6267,"_extension":20},"/en-us/blog/using-ansible-and-gitlab-as-infrastructure-for-code",{"ogTitle":6249,"schema":6250,"ogImage":6251,"ogDescription":6252,"ogSiteName":692,"noIndex":6,"ogType":693,"ogUrl":6253,"title":6249,"canonicalUrls":6253,"description":6252},"Build enterprise-grade IaC pipelines with GitLab DevSecOps","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab and Ansible to create infrastructure as code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brad Downey\"},{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-07-01\",\n      }","https://res.cloudinary.com/about-gitlab-com/image/upload/v1746211002/zlet4rmfg2z0j6lg16mc.png","Learn how to transform infrastructure automation into scalable, secure pipelines using GitLab, Terraform/OpenTofu, and Ansible with integrated security scanning and CI/CD.","https://about.gitlab.com/blog/using-ansible-and-gitlab-as-infrastructure-for-code",{"heroImage":6251,"body":6255,"authors":6256,"updatedDate":6259,"date":6260,"title":6249,"tags":6261,"description":6252,"category":14},"Infrastructure-as-code tools like TerraForm/OpenTofu and configuration management tools like Ansible are often part of mission-critical workflows. Such projects sometimes start as simple automations and are not necessarily subject to the same software development best practices and regulatory controls as business software applications.\n\nAt the same time many of these automations are developed by system engineers or infrastructure engineers who may not have as much experience with DevOps, DevSecOps, CI/CD, and test automation practices. This becomes even more complicated when you work in a large enterprise organization with multiple engineers and siloed teams.\n\nAt GitLab we know DevSecOps and we have been using our unified DevSecOps platform for enterprise-scale, mission-critical automation workloads for more than 10 years. We have thousands of customers who use GitLab as a foundation for infrastructure as code (IaC), automation, cloud, and platform engineering practices.\n\nIn this article, we showcase some of the key features teams can leverage to turn their powerful automations into scalable and auditable software delivery pipelines.\n\n![Automation listing](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433380/oipm6tq8qutoh1ctredd.png)\n\n## Implementation\n\n[This project](https://gitlab.com/gl-demo-ultimate-saberkan/public/ansible-demo) demonstrates a comprehensive DevOps workflow that combines the power of OpenTofu with modern Ansible practices, all orchestrated through GitLab CI/CD pipelines. The solution showcases how to provision an AWS lab environment using OpenTofu components integrated with GitLab, and then deploy a Tomcat web server using modern Ansible, including custom execution environments and collections.\n\nThe project leverages numerous GitLab features:\n\n* Building and storing custom Ansible execution environments in the [GitLab Container Registry](https://docs.gitlab.com/user/packages/container_registry/)\n* [Security scanning for infrastructure as code and container vulnerabilities](https://docs.gitlab.com/user/application_security/iac_scanning/)\n* Integrating [Ansible linting with GitLab's Code Quality](https://docs.gitlab.com/user/application_security/iac_scanning/)\n* Storing Tomcat binaries in the [Generic Package Repository](https://docs.gitlab.com/user/packages/generic_packages/)\n* Utilizing [CI/CD environment variables for configuration](https://docs.gitlab.com/ci/variables/)\n\nThe entire workflow is automated through a [GitLab pipeline](https://docs.gitlab.com/ci/pipelines/) that handles everything from infrastructure provisioning to application deployment and security testing.\n\n![ Workflow automated through a GitLab pipeline ](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433380/giatmolwn9inusi4cev2.png)\n\n### Provisioning the environment with OpenTofu\n\nThe project begins with provisioning an AWS lab environment using OpenTofu. This is achieved through native integration with [GitLab's OpenTofu components](https://docs.gitlab.com/user/infrastructure/iac/), which streamline the infrastructure provisioning process. The pipeline includes validate, plan, and apply stages that ensure proper infrastructure deployment while maintaining GitLab's IaC best practices.\n\nThis project is leveraging [GitLab's Terraform State management](https://docs.gitlab.com/user/infrastructure/iac/terraform_state/) and [Terraform Module Registry](https://docs.gitlab.com/user/packages/terraform_module_registry/) capabilities. Both of these features are compatible with OpenTofu and HashiCorp Terraform. GitLab OpenTofu components can also be used with HashiCorp Terraform with [slight customization](https://gitlab.com/components/opentofu#can-i-use-this-component-with-terraform). You'll need to build your own image that includes a script named `gitlab-tofu` to keep it compatible with the component jobs then you can then modify `tofu` commands with `terraform` commands.\n\nThe OpenTofu module release component is a sample demonstrating how to build a Terraform module and store it in GitLab's Terraform module registry. The `provision_lab.tf` file imports this module directly from GitLab to deploy the lab environment in AWS. Upon completion, it outputs an inventory file containing the public IP address of the provisioned instance, which can be used in configuration management stages with Ansible.\n\n```\n# From .gitlab-ci.yml\n - component: gitlab.com/components/opentofu/module-release@1.1.0\n   inputs:\n     root_dir: tofu\n     as: 🔍 tofu-module-release\n     stage: 🏗️ build-tofu-module\n     module_version: 0.0.1\n     module_system: aws\n     module_name: aws-lab\n     root_dir: tofu/modules/ansible-demo/aws-lab\n     rules:\n       - if: \"$CI_COMMIT_BRANCH\"\n         when: manual\n```\n\n```\n# From provision_lab.tf\nmodule \"aws-lab\" {\n  source = \"https://gitlab.com/api/v4/projects/67604719/packages/terraform/modules/aws-lab/aws/0.0.1\"\n}\n```\n\nThe validate, plan, and deploy components are configured with `**auto_define_backend: true**`, which automatically integrates with GitLab's built-in Terraform state backend. This approach eliminates the need for manual backend configuration or external state storage solutions like S3 buckets.\n\n```\n# From gitlab-ci.yml\n- component: gitlab.com/components/opentofu/apply@0.55.0\n  inputs:\n    version: 0.55.0\n    opentofu_version: 1.8.8\n    root_dir: tofu\n    state_name: demo\n    as: ✅ tofu-apply\n    stage: 🏗️ provision-lab\n    auto_define_backend: true\n    rules:\n      - if: \"$CI_COMMIT_BRANCH\"\n        when: manual\n```\n\n![Validate, plan, and deploy components are configured with `auto_define_backend: true`](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433380/giatmolwn9inusi4cev2.png)\n\nThe infrastructure configuration creates a CentOS Stream 9 EC2 instance with appropriate security groups for SSH access from GitLab runners and HTTP access to the Tomcat server.\n\nSSH access and HTTP configuration are configuration thought [GitLab CI/CD environment variables](https://docs.gitlab.com/ci/variables/#define-a-cicd-variable-in-the-ui).\n\n![SSH access and HTTP configuration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433381/cmqtzg6ahz8ua5w8ybgs.png)\n\nFor secure cloud access, the project implements [GitLab's OpenID Connect integration](https://docs.gitlab.com/ci/cloud_services/aws/) with AWS, using temporary credentials through AWS Security Token Service (STS):\n\n```\n# From .gitlab-ci.yml\n.tofu_aws_setup:\n id_tokens:\n   OIDC_TOKEN:\n     aud: https://gitlab.com\n before_script:\n   - echo \"${OIDC_TOKEN}\" > /tmp/web_identity_token\n   - export AWS_PROFILE=\"\"\n   - export AWS_ROLE_ARN=\"${AWS_ROLE_ARN}\"\n   - export AWS_WEB_IDENTITY_TOKEN_FILE=\"/tmp/web_identity_token\"\n```\n\n### Building the Ansible execution environment\n\nA key aspect of modern Ansible deployments is the use of [execution environments](https://docs.ansible.com/ansible/latest/getting_started_ee/index.html), containerized versions of Ansible with all necessary dependencies including roles and collections pre-installed. This project creates a custom execution environment based on Fedora 39, which includes ansible-core, ansible-runner, and additional collection such as ansible.posix required in this example for firewall and selinux configuration.\n\nThe third-party roles and collections in this project are natively downloaded from the community Ansible Galaxy repository. This approach leverages the community ecosystem of reusable Ansible content, as shown in the execution environment configuration. While this demo utilizes community Ansible resources, the exact same pipeline implementation is fully compatible with Red Hat Ansible Automation Platform. The pipeline structure remains identical, with only the content sources changing. Organizations using the enterprise version can simply redirect their automation content sources to their private Automation Hub instead of the default community Ansible Galaxy. According to the official enterprise documentation, this can be achieved by [configuring your private Automation Hub server and access token in the ansible.cfg](https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/1.2/html/getting_started_with_red_hat_ansible_automation_hub/proc-configure-automation-hub-server#proc-configure-automation-hub-server).\n\n```\n# From execution-environment.yml\n---\nversion: 3\n\nimages:\n  base_image:\n    name: quay.io/fedora/fedora:39\n\ndependencies:\n  ansible_core:\n    package_pip: ansible-core\n  ansible_runner:\n    package_pip: ansible-runner\n  system:\n    - openssh-clients\n    - sshpass\n  galaxy:\n    collections:\n    - name: ansible.posix\n      version: \">=2.0.0\"\n```\n\n![Execution environment pushed to GitLab's Container Registry ](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433384/dh1o2ojjmb04ru4tfr9k.png)\n\nThe execution environment is defined in a YAML file and built using ansible-builder, then pushed to [GitLab's Container Registry](https://docs.gitlab.com/user/packages/container_registry/). This approach ensures consistent execution environments across different systems and simplifies dependency management.\n\n```\n# From gitlab-ci.yml\n🔨 ansible-build-ee:\n  stage: 📦 ansible-build-ee\n  image: docker:24.0.5\n  needs: []\n  services:\n    - docker:24.0.5-dind\n  before_script:\n    - apk add --no-cache python3 py3-pip\n    - pip install ansible-builder\n    - cd ansible/execution-environment\n  script:\n    - ansible-builder build -t ${EE_IMAGE_NAME}:${EE_IMAGE_TAG} --container-runtime docker\n    - docker tag ${EE_IMAGE_NAME}:${EE_IMAGE_TAG} ${CI_REGISTRY_IMAGE}/${EE_IMAGE_NAME}:${EE_IMAGE_TAG}\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - docker push ${CI_REGISTRY_IMAGE}/${EE_IMAGE_NAME}:${EE_IMAGE_TAG}\n```\n\n### Deploying Tomcat with Ansible\n\nOnce the infrastructure is provisioned and the execution environment is built, the pipeline deploys Tomcat using [Ansible Navigator](https://ansible.readthedocs.io/projects/navigator/). The execution environment built in previous stage is used as image for deployment job in GitLab pipeline.\n\n```\n# From gitlab-ci.yml\n🚀 ansible-deploy:\n  stage: 🚀 ansible-deploy\n  image: ${CI_REGISTRY_IMAGE}/${EE_IMAGE_NAME}:${EE_IMAGE_TAG}\n  needs:\n    - ✅ tofu-apply\n  extends: [.ssh_private_key_setup, .default_rules]\n  script:\n    - ansible-navigator run ansible/playbook.yml\n      -i ansible/inventory/hosts.ini\n      --execution-environment false\n      --mode stdout\n      --log-level debug\n```\n\nThe Tomcat deployment fetches the application package from [GitLab's Generic Package Repository](https://docs.gitlab.com/user/packages/generic_packages/), configures system users and permissions, and sets up Tomcat as a systemd service.\n\n```\n# From playbook.yml\n---\n- name: Deploy Tomcat Server\n  hosts: all\n  become: true\n  roles:\n      - role: tomcat\n\n  vars:\n    # Tomcat package and installation\n    tomcat_package: \"https://gitlab.com/api/v4/projects/67604719/packages/generic/apache-tomcat/10.1.39/apache-tomcat-10.1.39.tar.gz\"\n    tomcat_install_dir: \"/opt/tomcat\"\n    java_package: \"java-17-openjdk-devel\"\n```\n\n![GitLab Package Registry](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433381/mynak8i2k7ms9vhdijqg.png)\n\n### Security scanning and code quality\n\nSecurity is integrated throughout the pipeline with multiple scanning tools. The project uses [GitLab's built-in SAST IaC scanner](https://docs.gitlab.com/user/application_security/iac_scanning/) to detect vulnerabilities in both Terraform and Ansible code. [Container scanning](https://docs.gitlab.com/user/application_security/container_scanning/) is applied to the execution environment image to identify any security issues and generate a [software bill of materials (SBOM)](https://docs.gitlab.com/user/application_security/container_scanning/#cyclonedx-software-bill-of-materials).\n\n```\n# From gitlab-ci.yml\ninclude:\n- template: Jobs/SAST-IaC.gitlab-ci.yml\n- template: Jobs/Container-Scanning.gitlab-ci.yml\n```\n\n![Security is integrated throughout the pipeline with multiple scanning tools](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433386/e6ejckcv5kdyhhosej2f.png)\n\n\n\n![Dependency listing](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433380/gsfpaldra4rmtkseaudo.png)\n\nAdditionally, the project integrates Ansible Linter with [GitLab's Code Quality](https://docs.gitlab.com/ci/testing/code_quality/#import-code-quality-results-from-a-cicd-job). This integration produces reports that are displayed directly in the GitLab interface, making it easy to identify and address issues.\n\n```\n# From gitlab-ci.yml\n🔍 ansible-lint:\n  stage: 🚀 ansible-deploy\n  image: ${CI_REGISTRY_IMAGE}/${EE_IMAGE_NAME}:${EE_IMAGE_TAG}\n  needs: []\n  script:\n    - ansible-lint ansible/playbook.yml -f codeclimate | python3 -m json.tool | tee gl-code-quality-report.json || true\n  artifacts:\n    reports:\n      codequality:\n        - gl-code-quality-report.json\n```\n\n![The project integrates Ansible Linter with GitLab code quality](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433380/gsfpaldra4rmtkseaudo.png)\n\n### Health-checking the deployment\n\nAfter deployment, the pipeline performs health checks to ensure that the Tomcat server is running correctly. The health-check job attempts to connect to the server's HTTP port and verifies that it returns a successful response. This ensures that the deployment has completed successfully, and the application is accessible.\n\nYou can test access from your browser into the Tomcat-provisioned instance using the public IP address of the EC2 provisioned instance.\n\n![Checking the health of a job](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433385/uksdkjryydxhu94v1naj.png)\n\n## Destroying the lab environment\n\nThe final stage of the pipeline is the cleanup process, which destroys the lab environment. This is implemented using the OpenTofu destroy component, which ensures that all resources created during the provisioning stage are properly removed.\n\n## Summary\n\nGitLab provides a unified DevSecOps platform and a framework to manage enterprise-scale, mission-critical infrastructure as code and configuration management automation practices. The framework includes version control, project planning and issue management, team collaboration, CI/CD pipelines, binary package and container registry, security scanning, and many other helpful features along with the ability to embed governance and controls in the processes. If you are looking to expand your private or public cloud practices or in general any governed, self-service automation workflow, consider GitLab, TerraForm, and Ansible as the three-legged stool and the foundation for a scalable and governed automation platform.\n\n> Get started with a [free, 60-day trial of GitLab Ultimate](http://bout.gitlab.com/free-trial/). Sign up today!",[6257,6258],"George Kichukov","Salahddine Aberkan","2025-04-24","2019-07-01",[2932,110],{"slug":6263,"featured":6,"template":678},"using-ansible-and-gitlab-as-infrastructure-for-code","content:en-us:blog:using-ansible-and-gitlab-as-infrastructure-for-code.yml","Using Ansible And Gitlab As Infrastructure For Code","en-us/blog/using-ansible-and-gitlab-as-infrastructure-for-code.yml","en-us/blog/using-ansible-and-gitlab-as-infrastructure-for-code",{"_path":6269,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6270,"content":6276,"config":6281,"_id":6283,"_type":16,"title":6284,"_source":17,"_file":6285,"_stem":6286,"_extension":20},"/en-us/blog/look-back-on-11-11-cicd",{"title":6271,"description":6272,"ogTitle":6271,"ogDescription":6272,"noIndex":6,"ogImage":6273,"ogUrl":6274,"ogSiteName":692,"ogType":693,"canonicalUrls":6274,"schema":6275},"Looking back on the 11.x releases for GitLab CI/CD","With GitLab 12.0 coming soon, it's a great time to reflect on all the features we've launched since 11.0.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666857/Blog/Hero%20Images/photo-cicdlookback.jpg","https://about.gitlab.com/blog/look-back-on-11-11-cicd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Looking back on the 11.x releases for GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jason Yavorska\"}],\n        \"datePublished\": \"2019-06-19\",\n      }",{"title":6271,"description":6272,"authors":6277,"heroImage":6273,"date":6278,"body":6279,"category":14,"tags":6280},[4945],"2019-06-19","\nGitLab releases each month, so if you aren't paying close attention it can be easy to\nlose track of all the great features that are coming out. With an eye towards [CI/CD](/solutions/continuous-integration/)\nin particular, I'd like to take you through some of the highlights in each of our 11.x releases,\neach of which contributed to our strategy around cloud native CI/CD that has\nsecurity and smarts built right in, supports code reusability and live troubleshooting,\nand in general enables your team to make progress towards your goal of better, more\nreliable software delivery.\n\n![Release Badges](https://about.gitlab.com/images/blogimages/11x_release_logos.png){: .shadow.medium.center}\n\nFor those who don't know me, I'm the director of product for CI/CD and I've spent\nmy career (going all the way back to doing build automation of Windows 98 at my\nfirst corporate job) out of doing build and release automation and process. I love\nthis stuff, and my career move from building CI/CD implementations to building\nCI/CD tools for folks just like me has been one of the most rewarding things I've\ndone in my life. I hope that experience and passion comes through in the features\nwe've delivered – either way, I'd love to chat with you if you're a user of GitLab\nCI/CD. DM me on [Twitter](https://twitter.com/j4yav) or contact me via my [GitLab profile](https://gitlab.com/jyavorska) if you'd like to chat.\n\nAnyway, without further ado let's dive into the first 11.x release!\n\n## [GitLab 11.0](/releases/2018/06/22/gitlab-11-0-released/)\n\n### Auto DevOps Generally Available\n\nWe kicked off the 11.0 series in June 2018 by launching [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/).\nBeyond making it easy to host and collaborate on public and private repositories,\nGitLab also simplifies the rest of the process by offering the whole delivery toolchain,\nbuilt in and automated: Simply commit your code and Auto DevOps can do the rest.\nAuto DevOps is a pre-built, fully featured CI/CD pipeline that takes the best of\nGitLab CI/CD features, adds a lot of smarts around auto-detecting what's in your\nproject, and automates the entire delivery process to your Kubernetes cluster.\n\nCheck out our [quick-start guide](https://docs.gitlab.com/ee/topics/autodevops/cloud_deployments/auto_devops_with_gke.html)\nif you haven't had a chance to play with it yet – you might be surprised what it's\ncapable of out of the box.\n\n![Auto DevOps](https://about.gitlab.com/images/11_0/auto-devops.png){: .shadow.medium.center}\n\n### Job logs in the Web IDE\n\nTying operational deployments/execution together with development is also a priority\nfor GitLab. In 11.0 we made the CI status of the current commit available in the status\nbar of the Web IDE, and made it possible to view the [status and the logs for each job on the right](https://docs.gitlab.com/ee/user/project/web_ide/#view-ci-job-logs).\nThis made it easy to fix a merge request with CI failures by opening the failed job\nright alongside your code.\n\n![Web IDE trace](https://about.gitlab.com/images/11_0/web_ide_ci_trace.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [CI/CD pipeline jobs integrated with the Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/#view-ci-job-logs)\n- [Variable-defined deployment policies for Canary deployments](https://docs.gitlab.com/ee/topics/autodevops/#deploy-policy-for-canary-environments)\n- [Specify deployment strategy from Auto DevOps settings](https://docs.gitlab.com/ee/topics/autodevops/#auto-deploy)\n\n---\n\n## [GitLab 11.1](/releases/2018/07/22/gitlab-11-1-released/)\n\n### Security reports in pipeline view\n\nSecurity was another important area of focus for us throughout the 11.x series. We\nalready had security reports in the MR before this release, but here we also\nadded status for branches so this information can be acted upon even earlier.\nGitLab 11.1 (July 2018) completed the [set of security reports shown in the pipeline view](https://docs.gitlab.com/ee/user/project/merge_requests/#security-reports),\nadding both Container Scanning and DAST. From there you could now simply review\nthe Reports tab to access all security information and take action.\n\n![Security Reports](https://about.gitlab.com/images/11_1/security_reports.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [Redesign of the merge request and pipeline info sections](https://docs.gitlab.com/ee/user/project/merge_requests/)\n- [Improved Kubernetes cluster page design](https://docs.gitlab.com/ee/user/project/clusters/)\n\n---\n\n## [GitLab 11.2](/releases/2018/08/22/gitlab-11-2-released/)\n\n### Custom templates at the instance level\n\nIn 11.2 (August 2018) we also introduced [custom templates at the instance level](https://docs.gitlab.com/ee/administration/custom_project_templates.html),\nmaking it easy for organizations to set up a basic template for how they want\ntheir CI/CD pipelines to run. Development teams can grab a copy of the template\nand go, confident their following their organizational processes. Our enterprise\ncustomers are very important to us, and this feature came directly from the great\nfeedback we get from our customers.\n\n![Project Templates](https://about.gitlab.com/images/11_2/project-templates-instance.png){: .shadow.medium.center}\n\n### Kaniko for Docker Builds\n\nHistorically, building Docker images within a containerized environment had\nrequired compromises, using techniques like docker-in-docker on privileged\ncontainers. These solutions were often insecure and slow. In this release we\nmade the Runner compatible with [Kaniko](https://docs.gitlab.com/ee/ci/docker/using_kaniko.html),\na new tool developed by Google which is able to securely build an image within\nan unprivileged container. Cloud-first build technology is so important for the\njourney we want to take with our users, and supporting these kinds of foundational\ntechnologies that make your life easier are so nice to deliver.\n\n![Kaniko](https://about.gitlab.com/images/11_2/kaniko.png){: .shadow.medium.center}\n\n### JUnit test results in merge requests\n\nFinally, testing will always be an important part of any CI/CD pipeline. With the 11.2 release,\nwe made it possible to [see JUnit test results directly](https://docs.gitlab.com/ee/ci/testing/unit_test_reports.html)\nright from the CI view in the merge request widget, as part of our ongoing efforts\nto invest in full-spectrum integrated testing within GitLab.\n\n![JUnit Results](https://about.gitlab.com/images/feature_page/screenshots/junit-test-summaries-MR-widget.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [GitLab Runner in cloud native Helm Chart](https://docs.gitlab.com/charts/)\n- [Built-in project templates switched to use Dockerfiles](https://docs.gitlab.com/ee/user/project/working_with_projects.html#create-a-project)\n- [Manually stop an environment](https://docs.gitlab.com/ee/ci/environments/index.html#stopping-an-environment)\n\n---\n\n## [GitLab 11.3](/releases/2018/09/22/gitlab-11-3-released/)\n\n### Built-in Maven package repository\n\nFor any development organization, having an easy and secure way to manage\ndependencies is critical. Package management tools, such as Maven for Java\ndevelopers, provide a standardized way to share and version control these\nlibraries across projects. In GitLab 11.3 (September 2018), we opened up [Maven repositories built directly into GitLab](https://docs.gitlab.com/ee/user/packages/maven_repository/index.html).\nJava developers were now easily able to publish their packaged libraries to\ntheir project’s Maven repository: Just share a simple XML snippet with\nother teams looking to utilize that library, and Maven and GitLab will take care\nof the rest.\n\n![Maven Repo](https://about.gitlab.com/images/11_3/maven.png){: .shadow.medium.center}\n\n### Interactive Web Terminals\n\nCI/CD jobs are executed in the runner as part of pipelines, but this execution wasn't interactive.\nWhen they failed, it wasn't always easy to dig into details to spot the source of the problem.\n[Interactive web terminals](https://docs.gitlab.com/ee/ci/interactive_web_terminal/)\nbrought the capability to connect to a running or completed job and manually enter\ncommands to understand what’s happening in the system, and helped us move the story\nforward on empowering teams to deliver code, troubleshoot, and solve issues directly.\n\n![Web Terminal](https://about.gitlab.com/images/11_3/verify-webterm.png){: .shadow.medium.center}\n\n### Better includes with `extends` keyword\n\nReusing CI/CD code is a great way to help ensure consistency in software delivery,\nand also minimizes the amount of per-job scripting that’s needed to write and\nmaintain. As of 11.11, we began offering a powerful alternative approach\nfor code reuse in templates using [YAML `extends` keywords](https://docs.gitlab.com/ee/ci/yaml/#extends),\nexpanding upon our vision for reusability and compliance in the enterprise.\n\n![Extends](https://about.gitlab.com/images/11_3/verify-includes.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [Protected environments](https://docs.gitlab.com/ee/ci/environments/protected_environments.html)\n- [Auto DevOps enabled by default](https://docs.gitlab.com/ee/topics/autodevops/)\n- [Custom file templates for self-managed instances](https://docs.gitlab.com/ee/administration/settings/instance_template_repository.html)\n\n---\n\n## [GitLab 11.4](/releases/2018/10/22/gitlab-11-4-released/)\n\n### Feature Flags\n\nFeature Flags are a no-brainer to make software deliver easier, so you knew we'd eventually\nwant to include them in the GitLab single application. With the 11.4 release (October 2018) we delivered on\nthis promise by adding [Feature Flags](https://docs.gitlab.com/ee/operations/feature_flags.html),\nhelping teams to achieve continuous delivery by offering better options for incrementally\nrolling out changes and separating feature delivery from customer launch.\n\n![Feature Flags](https://about.gitlab.com/images/11_4/feature_flags.png){: .shadow.medium.center}\n\n### `only/except` rules for changes to files\n\nA very popular requested feature, in 11.4 we added the ability within the\n`.gitlab-ci.yml` to [use `only`/`except` rules for jobs](https://docs.gitlab.com/ee/ci/yaml/#only--except)\nbased on when modifications occur to a specific file or path (or glob). This allowed\nfor even more smarts in the pipeline, especially for monorepo/microservice-type\nuse cases, where the pipeline behavior can be optimized based on the changed files\nin the repository.\n\n![Only Except Changes](https://about.gitlab.com/images/11_4/verify-onlyexceptchanges.png){: .shadow.medium.center}\n\n### Timed incremental rollouts\n\nTeams already had the ability within Auto DevOps to set up incremental rollouts,\nbut with this release we added an option to also set up [timed incremental rollouts](https://docs.gitlab.com/ee/topics/autodevops/#timed-incremental-rollout-to-production)\nwhere the rollout will automatically continue forward on a timed cadence, making\nsure there is no error before continuing. This helped us push our vision for safe,\ncontinous deployment forward by providing teams with a new tool to have control over\ntheir code rollouts.\n\n![Timed Incremental Rollouts](https://about.gitlab.com/images/11_4/timed_incremental_rollouts.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [Moving `includes` from Starter to Core](https://docs.gitlab.com/ee/ci/yaml/#include)\n- [Auto DevOps support for RBAC](https://docs.gitlab.com/ee/topics/autodevops/)\n- [Filter admin runners view by type/state](https://docs.gitlab.com/ee/ci/runners/)\n- [Support for interactive web terminals with Docker executor](https://docs.gitlab.com/ee/ci/interactive_web_terminal/)\n- [Delayed jobs for pipelines](https://docs.gitlab.com/ee/ci/yaml/#whendelayed)\n\n---\n\n## [GitLab 11.5](/releases/2018/11/22/gitlab-11-5-released/)\n\n### Access control for Pages\n\nWith the 11.5 release (November 2018) we delivered a fantastic community-contributed feature which enabled\naccess control for Pages. From now on, instead of only supporting use cases where the\ncontent associated with the product is public, you could use Pages to build and\npublish protected content that should [only be accessible by project members](https://docs.gitlab.com/ee/user/project/pages/introduction.html#gitlab-pages-access-control).\nOperational documentation, internal secrets, or even just private planning or\nother information can now be confidently published via your pipelines in an\neasy-to-access way, with confidence that only the right people are able to see it.\n\n![Access Control Pages](https://about.gitlab.com/images/11_5/access-control-pages.png){: .shadow.medium.center}\n\n### Deploy Knative to your Kubernetes cluster\n\nBuilding [serverless applications](/topics/serverless/) enables teams to focus their time on making a\ngreat product and eliminates the need of provisioning, managing, and operating\nservers. Starting in GitLab 11.5, we enabled [deploying Knative to your existing Kubernetes cluster](https://docs.gitlab.com/ee/update/removals.html)\nwith a single click using the GitLab Kubernetes integration. Knative is a\nKubernetes-based platform to build, deploy, and manage modern serverless workloads.\nTasks that were once difficult, such as source-to-container builds, routing and\nmanaging traffic, and scaling-to-zero, now work effortlessly out of the box.\n\n![KNative](https://about.gitlab.com/images/11_5/knative.png){: .shadow.medium.center}\n\n### Parallel attribute for faster pipelines\n\nThe speed to delivery in a CI/CD environment can oftentimes be limited by the time it takes to complete the various tests in order to ensure the code is able to be shipped. With the `parallel` keyword in GitLab CI/CD, teams can quickly and easily parallelize these tests – accelerating the testing process and overall time to delivery.\n\n![Parallel](https://about.gitlab.com/images/11_5/parallel-keyword.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [Review Apps can now link directly to changed pages](https://docs.gitlab.com/ee/ci/environments/index.html#going-from-source-files-to-public-pages)\n- [New CI/CD syntax for security, quality, and performance report types](https://docs.gitlab.com/ee/ci/yaml/#artifactsreports)\n- [Additional information about deployments in merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/index.html#pipeline-status-in-merge-requests)\n\n---\n\n## [GitLab 11.6](/releases/2018/12/22/gitlab-11-6-released/)\n\n### GitLab Serverless\n\nBuilding on the Knative integration introduced in the previous month, 11.6's new, more\ncomprehensive [Serverless](https://docs.gitlab.com/ee/update/removals.html)\ncapability enabled users to easily define functions in their repository and have\nthem served and managed by Knative. Cloud native is such an important part of our\nroadmap, and it was really exciting to launch this feature while I was at KubeCon\nno less.\n\nBy simply defining your function data in the repo’s `serverless.yml` file and\nusing a `.gitlab-ci.yml` template, each function will be deployed to your cluster,\nwith Knative taking care of scaling your function based on request volume. This\nenables application developers to iterate quickly without having to worry about\nprovisioning or managing infrastructure.\n\n![Serverless](https://about.gitlab.com/images/11_6/serverless.png){: .shadow.medium.center}\n\n### Run pipeline jobs for merge requests\n\nRunning a given job only when dealing with a merge request was made much easier in 11.6. Using the\n`merge_requests` value with `only/except` keywords will allow you to configure jobs\nto run [only or except when in the context of a merge request](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html).\nThis allows finer control over pipeline behavior, and also provides access to new\nenvironment variables indicating the target branch and merge request ID to be used\nfor additional automated behaviors.\n\n![Merge Request Pipelines](https://about.gitlab.com/images/11_6/verify-mergerequestpipelines.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [Kubernetes clusters for groups](https://docs.gitlab.com/ee/user/group/clusters/)\n- [Pipelines are now deletable via API](https://docs.gitlab.com/ee/api/pipelines.html#delete-a-pipeline)\n- [Trigger variables are now hidden in UI by default](https://docs.gitlab.com/ee/ci/triggers/)\n\n---\n\n## [GitLab 11.7](/releases/2019/01/22/gitlab-11-7-released/)\n\n### Releases page\n\nThe 11.7 release (January 2019) added the ability to [create releases in GitLab](https://docs.gitlab.com/ee/user/project/releases/index.html)\nand view them on a summary page. Releases are a snapshot in time of the source,\nlinks, and other metadata or artifacts associated with a released version of your\ncode, and helps users of your project to easily discover the latest releases\nof your software.\n\nThis is a feature that was, as a career release manager, near and dear to my heart.\nI have so many plans around [Release Orchestration](/direction/release/release_orchestration/)\nthat build on this feature as a foundation. Being able to tie a milestone to\na release, a feature coming very soon, will open the door to tying together all\nkinds of interesting things happening in GitLab to a release. This isn't my forward-looking\nblog post so I won't go too far here, but I'll just say I can't wait to\ngo on that journey to build something really unique and powerful together with our users.\n\n![Releases Page](https://about.gitlab.com/images/11_7/release-releases_page.png){: .shadow.medium.center}\n\n### Expand upstream/downstream pipelines across projects\n\nWith 11.7 it became possible to [expand upstream or downstream cross-project pipelines](https://docs.gitlab.com/ee/ci/pipelines/index.html#visualize-pipelines)\nright from the pipeline view, giving you visibility into your end-to-end pipelines,\nno matter in which project they start or finish. It's one pattern we've been seeing\nmore and more of in GitLab, and we're adding more features to support. The reality of\ncontinuous delivery is complex orchestration across projects and even groups, so\nthis is a feature that was nice to get out the door to help make this easier.\n\n![Cross-Project Pipelines](https://about.gitlab.com/images/11_7/release-pipeline_expansion.png){: .shadow.medium.center}\n\n### NPM package repository\n\nIn January we also started offering [NPM registries](https://docs.gitlab.com/ee/user/packages/npm_registry/index.html)\nbuilt directly into GitLab. From this point teams can share a simple package-naming\nconvention to utilize that library in any Node.js project, and NPM and GitLab will\ndo the rest – all from a single, easy-to-use interface. Yet another step on our path\nto enable all kinds of repositories, built right into GitLab when you need them.\n\n![NPM Packages](https://about.gitlab.com/images/11_7/npm_package_view.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [Ability to configure Kubernetes app secrets as variables in Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/#application-secret-variables)\n- [API support for Kubernetes integration](https://docs.gitlab.com/ee/api/project_clusters.html)\n- [Short commit SHA available as environment variable](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html)\n- [Authorization support for fetching includes](https://docs.gitlab.com/ee/ci/yaml/#include)\n- [Skip CI builds during git push with `skip_ci` keyword](https://docs.gitlab.com/ee/ci/pipelines/#skip-a-pipeline)\n\n---\n\n## [GitLab 11.8](/releases/2019/02/22/gitlab-11-8-released/)\n\n### `trigger:` keyword for pipelines\n\nEven as of GitLab 9.3 you were able to create multi-project pipelines by triggering\na downstream pipeline via a GitLab API call in your job. In GitLab 11.8 (February 2019), we added\nfirst-class support for triggering these downstream pipelines with the [`trigger:`](https://docs.gitlab.com/ee/ci/yaml/#trigger)\nkeyword, instead of requiring teams to make an API call to trigger the downstream\npipeline. A bit more for those cross-project use cases that makes everything just\na little bit nicer to use.\n\n![Trigger](https://about.gitlab.com/images/11_8/multi_project_pipeline_graph.png){: .shadow.medium.center}\n\n### Pages support for subgroups\n\nPages was updated in 11.8 to [work with subgroups in GitLab](https://docs.gitlab.com/ee/administration/pages/),\ngiving you the ability to create Pages sites at that level as well. Sites set up in this\nway will have a URL in the format of `toplevel-group.gitlab.io/subgroup/project`,\nmaking them very easy to find.\n\n![Pages for SubGroups](https://about.gitlab.com/images/11_8/release-pages-subgroups.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [Several new templates for getting started quickly with GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/#getting-started)\n- [Auto DevOps support for environment-specific custom domain](https://docs.gitlab.com/ee/topics/autodevops/#environment-variables)\n- [Feature Flags was improved by making them environment-aware](https://docs.gitlab.com/ee/operations/feature_flags.html#define-environment-specs)\n- [CI_PAGES and CI_PAGES_URL added as helpful variables accessible to Pages pipelines](https://docs.gitlab.com/ee/user/project/pages/)\n- [.html extensions are now automatically resolved for Pages sites](https://docs.gitlab.com/ee/user/project/pages/)\n- [Tolerations were added to the Kubernetes executor](https://docs.gitlab.com/runner/executors/kubernetes.html#the-keywords)\n- [A new cleanup procedure for the Container Registry](https://docs.gitlab.com/ee/api/container_registry.html#delete-a-repository-tag)\n- [Force redeploy when Auto DevOps secrets are updated](https://docs.gitlab.com/ee/topics/autodevops/#environment-variables)\n\n---\n\n## [GitLab 11.9](/releases/2019/03/22/gitlab-11-9-released/)\n\n### Feature Flag auditability\n\nWith the 11.9 release (March 2019), operations like adding, removing, or changing Feature Flags\nare now [recorded in the GitLab audit log](https://docs.gitlab.com/ee/administration/audit_events.html),\ngiving you visibility into what is changing and when. If you’re having an incident\nand need to see what changed recently, or just need to look back as an auditor on\nhow your feature flags have been modified, this is now very easy to do. We have\nbig plans for Feature Flags, and also compliance built right into your pipelines.\nIt was great to knock out a two-for-one with this one.\n\n![Feature Flag audit events](https://about.gitlab.com/images/11_9/release-ffaudit.png){: .shadow.medium.center}\n\n### Security templates for pipelines\n\nGitLab security features evolve very fast, and they always need to be up to\ndate to be effective and protect your code. We know that changing the job\ndefinition is difficult if you have to manage multiple projects. As of this release we\ninclude bundled security templates [directly into your configuration](https://docs.gitlab.com/ee/user/application_security/sast/#configuring-sast),\nand have them updated with your system every time you upgrade to a new version of\nGitLab, without any change to any pipeline configuration required. Security plus\nreusability, a great combination.\n\n![Security Templates](https://about.gitlab.com/images/11_9/templates.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [Project templates for .NET, Go, iOS, and Pages](https://docs.gitlab.com/ee/user/project/working_with_projects.html#built-in-templates)\n- [Run specific jobs on merge requests only when files change](https://docs.gitlab.com/ee/ci/jobs/job_control.html#use-onlychanges-with-merge-request-pipelines)\n- [Auto DevOps build jobs for tags](https://docs.gitlab.com/ee/topics/autodevops/#auto-build)\n\n---\n\n## [GitLab 11.10](/releases/2019/04/22/gitlab-11-10-released/)\n\n### Pipeline dashboard\n\nIn 11.10 (April 2019) we added [pipeline status information to the Operations Dashboard](https://docs.gitlab.com/ee/user/operations_dashboard/).\nThis helps teams view the pipeline health of all the projects that they care about,\nall together in a single interface. Yet another step towards making pipelines across\nyour instance easy to understand and follow, this one was built in real-time coordination\nwith a customer, which is always a nice way to get something done. You get to build\nsomething that solves a real problem and collaborate directly with the folks who\nneed it.\n\n![Pipeline Dashboard](https://about.gitlab.com/images/11_10/cross-project-pipelines-dashboard.gif){: .shadow.medium.center}\n\n### Pipelines on merge results\n\nWhen working in a feature branch, it’s normal to have it diverge over\ntime from the target branch if you aren’t rebasing frequently. This can result\nin a situation where both the source and target branch’s pipelines are green and\nthere are no merge conflicts, but the combined output will result in a failed\npipeline due to an incompatibility between the changes.\n\nWith 11.10 it became possible for a pipeline to automatically create a new ref that\ncontains the combined merge result of the source and target branch, then\n[run the pipeline against that ref](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html)\n(what we call an `attached` state). In this way, GitLab can help teams keep their\nmaster branch green even when they have many teams merging into the release branch.\n\nTools and techniques built right into GitLab for keeping master green was a big\nfocus in the last few releases of 11.x, and will remain so for 12.x as well. Look\nfor [merge trains](https://gitlab.com/gitlab-org/gitlab-ee/issues/9186) to be built\non top of this foundation, and some really cool enhancements around sequencing and\nparallelization of them.\n\n![Merge Ref Pipeline](https://about.gitlab.com/images/11_10/merge_request_pipeline.png){: .shadow.medium.center}\n\n### Composable Auto DevOps\n\nAuto DevOps enables teams to adopt modern DevOps practices with little to no effort.\nStarting in GitLab 11.10 each job of Auto DevOps was made available as an\nindependent template. Using the includes feature of GitLab CI, users can [choose to bring in\nonly certain stages of Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/#using-components-of-auto-devops) while continuing to use their own custom\n`gitlab-ci.yml` for the rest. This helps teams to use just the desired jobs, while\ntaking advantage of any updates made upstream.\n\n![Composable Auto DevOps](https://about.gitlab.com/images/11_10/composable-auto-devops.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [More thorough Container Registry cleanup](https://docs.gitlab.com/omnibus/maintenance/#removing-unused-layers-not-referenced-by-manifests)\n- [Ability to purchase CI add-on runner minutes](https://docs.gitlab.com/ee/administration/settings/continuous_integration.html#extra-shared-runners-pipeline-minutes-quota)\n- [Change the cloning path for pipelines](https://docs.gitlab.com/ee/ci/runners/configure_runners.html#custom-build-directories)\n- [Simple masking of protected variables in logs](https://docs.gitlab.com/ee/ci/variables/#masked-variables)\n- [Enable/disable Auto DevOps at the group level](https://docs.gitlab.com/ee/topics/autodevops/#enablingdisabling-auto-devops-at-the-group-level)\n- [Group-level runners for group-level clusters](https://docs.gitlab.com/ee/user/group/clusters/#installing-applications)\n- [Control over `git clean` flags in pipeline jobs](https://docs.gitlab.com/ee/ci/runners/configure_runners.html#git-clean-flags)\n\n---\n\n## [GitLab 11.11](/releases/2019/05/22/gitlab-11-11-released/)\n\n### Windows Container Executor\n\nIn GitLab 11.11 (May 2019) we were very pleased to add a new executor to the GitLab Runner\nfor using [Docker containers on Windows](https://docs.gitlab.com/runner/executors/docker.html#using-windows-containers).\nPreviously, using the shell executor to orchestrate Docker commands was the primary\napproach for Windows, but with this update you are now able to use Docker\ncontainers on Windows directly, in much the same way as if they were on Linux\nhosts. This opened up the door for more advanced kinds of pipeline orchestration\nand management for our users of Microsoft platforms.\n\nAlso included with this update was improved support for PowerShell throughout GitLab\nCI/CD, as well as new helper images for various versions of Windows containers.\n\n![Windows Executor](https://about.gitlab.com/images/11_11/windows-container.png){: .shadow.medium.center}\n\n### Caching proxy for Container Registry\n\nLots of teams are using containers as part of their build pipelines, and our new\n[caching proxy](https://docs.gitlab.com/ee/user/packages/dependency_proxy/index.html) for\nfrequently used upstream images/packages introduced a great way to speed them up.\nBy keeping a copy of needed layers locally using the new caching proxy, you can\neasily improve execution performance for the commonly used images in your environment.\n\n![Dependency Proxy](https://about.gitlab.com/images/11_11/dependency-proxy-mvc.png){: .shadow.medium.center}\n\n### Chat notifications for deployments\n\nIn 11.11 deployment events were available to be [automatically shared in your team’s channel](https://docs.gitlab.com/ee/user/project/integrations/)\nthrough our Slack and Mattermost chat integrations, helping bring visibility to\nthese important activities that your teams need to be aware of.\n\n![Notifications](https://about.gitlab.com/images/11_11/release-slack-notification.png){: .shadow.medium.center}\n\n### Guest Access for Releases\n\nIt also became possible in this release for [guest users of your projects to view releases](https://docs.gitlab.com/ee/user/permissions.html#releases-permissions)\nthat you have published on the Releases page. They will be able to download your\npublished artifacts, but are prevented from downloading the source code or seeing\nrepository information such as tags and commits.\n\n![Guest Releases](https://about.gitlab.com/images/11_7/release-releases_page.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [Add-on runner minutes extended to free plans](https://docs.gitlab.com/ee/administration/settings/continuous_integration.html#extra-shared-runners-pipeline-minutes-quota)\n- [Access deployment details through environments API](https://docs.gitlab.com/ee/api/environments.html#get-a-specific-environment)\n- [Create a file directly from environment variable](https://docs.gitlab.com/ee/ci/variables/#variable-types)\n- [Run all manual jobs for a stage in one click](https://docs.gitlab.com/ee/ci/pipelines/index.html#add-manual-interaction-to-your-pipeline)\n\n---\n\n## In conclusion\n\nPhew... that was a lot of great features, and the team here at GitLab is really proud of\nwhat we delivered with this series of GitLab releases. I hope you found something\nthat you can take advantage of in your own CI/CD process. If you're interested in\nseeing where we're heading next, head over to our [CI/CD strategy page](/direction/ops/)\nand check out what's coming. Also, be sure to check out our 12.0 release post coming out on the 22nd of this month.\n\nOne of the things you may have noticed is that we frequently add new iterations\non our features, even month to month. We have a lot more iterations planned, both\nfor new and existing features, but what would you like to see in the next\nversion of your favorite feature? We'd love to hear – let us know in the\ncomments below.\n\nPhoto by [Zoltan Tasi](https://unsplash.com/photos/O_mBXldZ0hc?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/arrow?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[894,110,749],{"slug":6282,"featured":6,"template":678},"look-back-on-11-11-cicd","content:en-us:blog:look-back-on-11-11-cicd.yml","Look Back On 11 11 Cicd","en-us/blog/look-back-on-11-11-cicd.yml","en-us/blog/look-back-on-11-11-cicd",{"_path":6288,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6289,"content":6295,"config":6301,"_id":6303,"_type":16,"title":6304,"_source":17,"_file":6305,"_stem":6306,"_extension":20},"/en-us/blog/how-we-migrated-our-markdown-processing-to-commonmark",{"title":6290,"description":6291,"ogTitle":6290,"ogDescription":6291,"noIndex":6,"ogImage":6292,"ogUrl":6293,"ogSiteName":692,"ogType":693,"canonicalUrls":6293,"schema":6294},"How we migrated to CommonMark","A senior backend engineer shares how (and why) we migrated our Markdown processing from RedCarpet to CommonMark.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671172/Blog/Hero%20Images/markdown-tutorial-cover.png","https://about.gitlab.com/blog/how-we-migrated-our-markdown-processing-to-commonmark","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we migrated to CommonMark\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brett Walker\"}],\n        \"datePublished\": \"2019-06-13\",\n      }",{"title":6290,"description":6291,"authors":6296,"heroImage":6292,"date":6298,"body":6299,"category":14,"tags":6300},[6297],"Brett Walker","2019-06-13","\n[Markdown](https://daringfireball.net/projects/markdown/) was originally created by John Gruber as a way of writing readable plain text that can be easily converted into HTML or XHTML. Over the years it has become widely adopted for writing online content, whether it's a blog post, discussion threads, documentation, and even books.\n\n### Why we moved to CommonMark\n\nVarious \"flavors\" of Markdown have been created, each with their own extensions and idiosyncrasies. [GitHub Flavored Markdown](https://github.github.com/gfm/) is one of the most widely used and adopted set of extensions (such as task lists). With all the flavors behaving a little differently, it has become increasingly difficult to write Markdown content and have it [properly rendered everywhere](https://babelmark.github.io).\n\n[GitLab uses Markdown extensively](https://docs.gitlab.com/ee/user/markdown.html) – almost all user-generated content is written in it, from issue and merge request descriptions, to comments and discussions, wiki pages, etc.\n\nThe goal of [CommonMark](https://commonmark.org) is to create \"a strongly defined, highly compatible specification of Markdown\":\n\n> We propose a **standard, unambiguous syntax specification for Markdown**, along with a **suite of comprehensive tests** to validate Markdown implementations against this specification. We believe this is necessary, even essential, for the future of Markdown.\n\nBy adopting CommonMark as standard, we move closer to having Markdown files that are consistently rendered across applications. Ideally, Markdown content should be able to be rendered the same on GitHub, GitLab, or any other Markdown-aware application.\n\nUsers have also opened numerous issues about our Markdown not working as they expected. CommonMark solves most of these problems, and therefore gives our users a more consistent and usable experience.\n\nMany platforms have also migrated to CommonMark, including [GitHub](https://github.com) and [Discourse](https://discourse.org).\n\n### Phases of migration\n\nWe rolled out the migration in phases, wanting to make it as painless as possible. We achieved this with the gracious help of [@blackst0ne](https://gitlab.com/blackst0ne) who started the effort.\n\n#### Phase 1: Only new content\n\nIn the first phase ([GitLab 11.1](/releases/2018/07/22/gitlab-11-1-released/)), we only allowed CommonMark to be used in newly created content, such as issue and merge request descriptions, comments, etc. Any older content, even if edited, would continue to be rendered using RedCarpet.\n\nSince we cache rendered Markdown for performance, we keep a `cached_markdown_version` value in our database. Using this, we were able to determine whether the content should be rendered with CommonMark or not. As the largest version number at the time was 3, we designated version 10 as being the start of any CommonMark cached content. Anything less would be considered RedCarpet markdown.\n\nAdditionally we did not use CommonMark for repository and wiki files.  We wanted a [minimal viable change](https://handbook.gitlab.com/handbook/values/#minimal-viable-change-mvc) to not only test the waters but minimize any initial problems.\n\n#### Phase 2: Repository and wiki files\n\nThe next step ([GitLab 11.3](/releases/2018/09/22/gitlab-11-3-released/)) was to allow repository and wiki files to be rendered with CommonMark. This was a bigger change in the sense that existing content could potentially look different.\n\nIn practice, RedCarpet and CommonMark are very similar, as would be expected, and are in general compatible. There are a few instances where the syntax is different, such as the indentation needed for bulleted lists inside a numbered list, or the use of superscripts, which CommonMark does not support. For most documents, no change is needed.\n\nHowever we also knew that we couldn't touch user files or do any sort of migration. Instead, we created a [small tool](https://gitlab.com/digitalmoksha/diff_redcarpet_cmark) that can generate the changes necessary for files to be converted. This is done by rendering into HTML using RedCarpet, and then generating CommonMark from it using [Pandoc](https://pandoc.org).\n\n#### Phase 3: CommonMark throughout\n\nThe final phase was to remove RedCarpet completely. By this time, CommonMark had been in use for several months – only older content was still being rendered with RedCarpet. However, we were accumulating technical debt by supporting both methods – new functionality or security fixes had to consider both renderers.\n\nWith RedCarpet removed, we now display older content with CommonMark. Differences are small and only affect a small percentage of the overall content, and the possibility of looking at older issues or merge requests decreases with time.\n\n#### Improvements upstream\n\nThere were a couple of issues we ran into during the implementation where we were able to drive some changes to the upstream libraries.\n\nThe first is that RedCarpet supports using `~~` to indicate using strikethrough. We use [cmark-gfm](https://github.com/github/cmark-gfm) for rendering, giving us both CommonMark and common GitHub Flavored Markdown extensions. And although it supports using any number of tildes for strikethrough, we wanted to make the transition a little easier by [only supporting double tildes](https://github.com/github/cmark-gfm/issues/71). A new option, `CMARK_OPT_STRIKETHROUGH_DOUBLE_TILDE`, was added to [ gjtorikian/commonmarker](https://github.com/gjtorikian/commonmarker/commit/1a973ba872e50b22ee53652ffa12cdfe2fe90155), allowing us to turn on support for double-tilde strikethroughs.\n\nSecond, we found a bug in [gjtorikian/commonmarker](https://github.com/gjtorikian/commonmarker/issues/56), where a `\u003Ctbody>` wasn't getting rendered. This was quickly fixed.\n\nMany thanks to [@kivikakk](https://github.com/kivikakk) and [@gjtorikian](https://github.com/gjtorikian) for their help with these issues.\n\n### Conclusion\n\nThe transition took several months, but we're happy to have moved our Markdown processing to the next level. And should you run into the few problematic edge cases, [diff_redcarpet_cmark](https://gitlab.com/digitalmoksha/diff_redcarpet_cmark) should be able to help.\n",[1347],{"slug":6302,"featured":6,"template":678},"how-we-migrated-our-markdown-processing-to-commonmark","content:en-us:blog:how-we-migrated-our-markdown-processing-to-commonmark.yml","How We Migrated Our Markdown Processing To Commonmark","en-us/blog/how-we-migrated-our-markdown-processing-to-commonmark.yml","en-us/blog/how-we-migrated-our-markdown-processing-to-commonmark",{"_path":6308,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6309,"content":6315,"config":6320,"_id":6322,"_type":16,"title":6323,"_source":17,"_file":6324,"_stem":6325,"_extension":20},"/en-us/blog/it-automation-developer-productivity",{"title":6310,"description":6311,"ogTitle":6310,"ogDescription":6311,"noIndex":6,"ogImage":6312,"ogUrl":6313,"ogSiteName":692,"ogType":693,"canonicalUrls":6313,"schema":6314},"How IT automation impacts developer productivity","See how IT automation promotes a healthier IT culture and unlocks next-level DevOps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670529/Blog/Hero%20Images/automate-retrospectives.jpg","https://about.gitlab.com/blog/it-automation-developer-productivity","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How IT automation impacts developer productivity\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-05-30\",\n      }",{"title":6310,"description":6311,"authors":6316,"heroImage":6312,"date":6317,"body":6318,"category":14,"tags":6319},[4535],"2019-05-30","\n\nQuestion: If developers spend the bulk of their days on painful, manual tasks, would you say that’s the best use of their time? In a development environment that is always [trying to do more with less](/topics/devops/reduce-devops-costs/), manual processes are productivity killers.\n\nAutomation makes it possible for engineering talent to use their skills on projects that add real business value and contribute to long-term growth. In the world of QA, test automation is creating a modern strategy [focused on excellent user experiences](/blog/trends-in-test-automation/). IT automation makes it possible to deploy applications faster and increase developer productivity, making the DevOps lifecycle more seamless.\n\n\n## The right people doing the right tasks\n\nIT automation ensures businesses have the right people performing the right tasks, and that has some unexpected benefits. Directing developer talent toward strategic initiatives actually creates a healthier DevOps culture. When developers can work on challenges that are more aligned with their role, they’re likely to be happier and more motivated, and that in turn helps with retention. One of the top reasons developers leave is because [they feel unchallenged in their work](https://differential.com/insights/why-software-developers-leave-and-best-ways-to-retain-them/). IT automation lets developers use their skills for projects where they’re most suited.\n\nThere’s a cost benefit to IT automation, as well. If you have senior engineers working on basic maintenance, [you’re spending too much on maintenance](https://enterprisersproject.com/article/2017/12/5-factors-fueling-automation-it-now), period. Even if you limit these tasks to junior levels, you’re probably still spending too much. While there's a lot more to automation than reducing costs, it's an undeniable benefit.\n\nIf it can be automated, it probably should be.\n\n\n## Automating for growth\n\nAs organizations innovate and increase their deployments, they’ll need IT architecture that supports that growth. Could engineers manually develop and configure 50, 100, or even 200 servers? Sure. But what about 1,000 or 2,000? That’s where IT automation becomes a necessity for scalable workloads. Putting special focus on the handoffs between processes (where waste most often occurs) is how leaders can identify the best automation opportunities. [Value stream mapping](https://www.linkedin.com/pulse/automate-question-ricardo-coelho-de-sousa/) is a method used to uncover what should be fully automated, and what may only need partial automation in the interim.\n\nWithout the right IT automation, growth will undoubtedly suffer as teams need more and more staff to keep up with demand. Automation and collaboration are an essential part of operational efficiency, accelerating delivery, and innovating products. CI/CD is the link that connects developers and operations, and that automation helps developers teams build better software and vastly improves the handoff process.\n\n\n## Minimizing risk\n\nReducing manual work [minimizes the risk of human error](https://techbeacon.com/devops/how-take-architectural-approach-it-automation), which gives IT the ability to focus on mission-critical tasks rather than cleaning up mistakes. IT automation also adds a system of checks and balances, so if a mistake happens, errors can be rolled back painlessly.\n\nAutomation tools and containers can make security more efficient. [Kubernetes](/solutions/kubernetes/) not only manages container deployments, it can also orchestrate security tasks. “You really want automation, orchestration to help manage which containers should be deployed to which hosts … knowing which containers need to access each other; managing shared resources, and monitoring container health,” says Red Hat security strategist Kirsten Newcomer. “[As you scale up your use of containers and microservices, automation soon becomes a core need](https://enterprisersproject.com/it-automation).”\n\nRemoving the human error component gives developers the peace of mind to work at the pace they want.\n\n\n## Keeping up with innovation\n\nSpeaking of speed – in the (not so distant) past, developers had to write docs and notify teammates about changes in the cloud environment, share content about provisioning and de-provisioning, synchronize problems, and exchange emails. All of that took time. The fewer barriers developers have between code and deployment, the better.\n\n[DevOps tools have created a buffer that allows developers and operations teams to work independently](https://www.infoworld.com/article/3230285/how-devops-changes-dev-and-ops.html). Automation is just a continuation of that DevOps journey – developers can work in real time, and operations teams still procure hardware and manage servers, but at a larger scale. Automation works best when you have specific objectives in mind.\n\nThe team at Monkton had a goal: The moment code is checked in and reviewed, they wanted the testing, deployment, and the security vulnerability scanning lifecycles automated. They wanted their people to do what they do best but had a hodgepodge of tools that couldn’t work together. They brought in better tools to automate those processes, tied them into GitLab, and now they have the repeatability they need at the speed they want.\n\n[Read their story](/blog/monkton-moves-to-gitlab-customer-story/).\n{: .alert .alert-gitlab-purple .text-center}\n\nIT automation is what makes next-level DevOps possible and gives developers the opportunity to use their skills in ways that add real, long-term value. When organizations automate mundane, manual tasks, they save costs and create a healthy IT culture where developers are challenged and processes are efficient – a real win-win.\n\nAre you ready to explore the benefits of IT automation and increase developer productivity? [Just commit](/blog/application-modernization-best-practices/).\n\nPhoto by [Daniele Levis Pelusi](https://unsplash.com/photos/Pp9qkEV_xPk?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/automation?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[894,1646,110],{"slug":6321,"featured":6,"template":678},"it-automation-developer-productivity","content:en-us:blog:it-automation-developer-productivity.yml","It Automation Developer Productivity","en-us/blog/it-automation-developer-productivity.yml","en-us/blog/it-automation-developer-productivity",{"_path":6327,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6328,"content":6334,"config":6340,"_id":6342,"_type":16,"title":6343,"_source":17,"_file":6344,"_stem":6345,"_extension":20},"/en-us/blog/upgrade-to-rails5",{"title":6329,"description":6330,"ogTitle":6329,"ogDescription":6330,"noIndex":6,"ogImage":6331,"ogUrl":6332,"ogSiteName":692,"ogType":693,"canonicalUrls":6332,"schema":6333},"The road to Rails 5","Senior Backend Engineer Jan Provaznik shares some of the challenges we encountered when upgrading GitLab to Rails 5 – and how we overcame them.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683399/Blog/Hero%20Images/road-to-rails-5.jpg","https://about.gitlab.com/blog/upgrade-to-rails5","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The road to Rails 5\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jan Provaznik\"}],\n        \"datePublished\": \"2019-05-28\",\n      }",{"title":6329,"description":6330,"authors":6335,"heroImage":6331,"date":6337,"body":6338,"category":14,"tags":6339},[6336],"Jan Provaznik","2019-05-28","\nWith [Rails 6 coming soon](https://weblog.rubyonrails.org/2018/12/20/timeline-for-the-release-of-Rails-6-0/) it's a good time to look back at the journey we took when upgrading GitLab to Rails 5, which was not so long ago.\n\n[Our issue for upgrading to Rails 5](https://gitlab.com/gitlab-org/gitlab-ce/issues/14286) was around for quite a while, largely because it was difficult to switch such a big project as GitLab instantly to the next major version. Here is a brief story about how we solved this upgrade challenge.\n\n## Our solution? Cut it into pieces\n\nThe upgrade to Rails 5 was first prepared as a one big merge request. The nice thing about this approach is that when the merge request is ready, you can just merge the single merge request without dealing with any backward compatibility. The [first attempt](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/5555) had lower priority and it was later replaced with a [second attempt](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/12841). But for the GitLab codebase this merge request became pretty big: 151 commits, over 120 pushes, and more than 1000 changed files. Then it was almost impossible to get such merge request ready to be merged and keep it up to date without hitting problems with conflicts.\n\nRather than trying to get the upgrade done in a single merge request, a couple of changes made it possible to run the application either on Rails 4 or 5 depending on an environment variable. The application was still running on Rails 4 by default, but we were able to run it on Rails 5 either locally or in CI just by setting `RAILS5` and `BUNDLE_GEMFILE` environment variables. This allowed us to split the upgrade into many small issues. Typically each issue addressed one specific type of error in CI, so with each fix there were fewer failing tests in CI. Another major benefit was that then it was significantly easier to split the work between more people and to get an overview of who was working on what issue.\n\nA Rails version-specific Gemfile was loaded depending on the `RAILS5` and `BUNDLE_GEMFILE` environment variable. Here is an example of [enabling Rails 5 in rspec](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/18140):\n\n```ruby\ngemfile = %w[1 true].include?(ENV[\"RAILS5\"]) ? \"Gemfile.rails5\" : \"Gemfile\"\nENV['BUNDLE_GEMFILE'] ||= File.expand_path(\"../#{gemfile}\", __dir__)\n```\n\nThe content of [Gemfile.rails5](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/17761):\n\n```ruby\n# BUNDLE_GEMFILE=Gemfile.rails5 bundle install\n\nENV[\"RAILS5\"] = \"true\"\n\ngemfile = File.expand_path(\"../Gemfile\", __FILE__)\n\neval(File.read(gemfile), nil, gemfile)\n```\n\nAnd the Gemfile:\n\n```ruby\ndef rails5?\n  %w[1 true].include?(ENV[\"RAILS5\"])\nend\n￼\ngem_versions = {}\ngem_versions['activerecord_sane_schema_dumper'] = rails5? ? '1.0'      : '0.2'\ngem_versions['default_value_for']               = rails5? ? '~> 3.0.5' : '~> 3.0.0'\ngem_versions['html-pipeline']                   = rails5? ? '~> 2.6.0' : '~> 1.11.0'\ngem_versions['rails']                           = rails5? ? '5.0.6'    : '4.2.10'\ngem_versions['rails-i18n']                      = rails5? ? '~> 5.1'   : '~> 4.0.9'\n```\n\nThere were situations when a fix for Rails 5 was not compatible with Rails 4 and two different versions of code were needed, typically an Active Record query. For this purpose we used a simple helper method `Gitlab.rails5?` to check which version was being used and added code for each version. It was pretty easy to remove all Rails 4-compatible code in the cleanup phase when we upgraded to Rails 5 just by searching for `Gitlab.rails5?` in our codebase.\n\nAn example of the check used in `lib/gitlab/database.rb`:\n\n```ruby\ndef self.cached_table_exists?(table_name)\n  if Gitlab.rails5?\n    connection.schema_cache.data_source_exists?(table_name)\n  else\n    connection.schema_cache.table_exists?(table_name)\n  end\nend\n```\n\n## Upgrade process\n\nTo be able to address upgrade issues in small, separate pieces, we did the following steps during the upgrade process:\n\n* [Allowed GitLab to run both with Rails 4 and 5](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/17761), but keep Rails 4 default.\n* We also [added support both for Rails 4 and 5 into GDK](https://gitlab.com/gitlab-org/gitlab-development-kit/merge_requests/497).\n* Fixed all issues until it fully worked with Rails 5 and CI was green.\n* Did manual testing to make sure everything will work after the upgrade.\n* [Switched to Rails 5 by default](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/21492), (but kept Rails 4 code).\n* Still enforced compatibility with Rails 4 (by running CI both with Rails 4 and 5) in case we had to switch back because of a blocker issue.\n* Dropped Rails 4 compatibility when we were sure everything worked. Releases are done monthly, so we removed Rails 4 code after the next release.\n\n## Major challenges\n\n### Active Record changes\n\nIn some places we use `Arel` directly and there were various incompatible changes (e.g. [`IN` statement issue](https://github.com/rails/arel/issues/531) solved by [this fix](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/19796)) which caused some of our SQL queries to stop working on Rails 5. (Almost) all of them were discovered during the preparation phase thanks to good test coverage. A list of [database-related changes is here](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests?label_name%5B%5D=rails5&label_name%5B%5D=database&scope=all&state=all).\n\n### Monkey patches\n\nWe keep various monkey patches (either not-merged-yet upstream fixes or custom extensions), many of which required refactoring with the major upgrade. The positive is that we were able to get rid of some of them.\n\n### Keeping Rails 5 CI green\n\nThere was quite a long period between the moment we had all Rails 5 issues fixed and the moment we really switched the master branch to Rails 5.\nDuring this period we used a scheduled pipeline which ran daily on CE and EE master branches on Rails 5, so we knew quickly when a new incompatibility issue was introduced.\nAnother option was running CI jobs both for Rails 4 and 5 for each merge request and making it mandatory to pass all jobs. The disadvantage of this option was it would take twice as much time to run CI.\n\nUnfortunately there were many new incompatibility issues introduced during this period. Next time it would be better to run CI for each merge request, both with Rails 4 and 5, although it would require twice as much CI runtime.\n\n## Production release\n\nOnce we had all known issues in our codebase fixed, we still had additional steps to make sure we didn't hit a critical issue when releasing the next version. [We tracked these steps in this issue](https://gitlab.com/gitlab-org/gitlab-ce/issues/48991). We switched master branches to Rails 5 at the beginning of the development cycle (each cycle is one month long). We then ran CI jobs both with Rails 5 (default) and 4 (to keep backward compatibility). Timing was important because during the development cycle we discovered a couple of issues and we had enough time to fix them before release. After the release of the next version (11.6), when we were sure that we would not have to switch back to Rails 4, we removed Rails 4 both from CI and from the codebase.\n\nAlthough it took longer than expected, I think this upgrade was successful because it didn't cause any production issues. There were a few [major issues](https://gitlab.com/gitlab-org/gitlab-ce/issues?scope=all&utf8=%E2%9C%93&state=closed&label_name[]=rails5&label_name[]=P1) discovered after switching the master branch, but we were able to fix them quickly before release.\n\nThis upgrade was done with huge help from our community – especially [@blackst0ne](https://gitlab.com/blackst0ne) and [@jlemaes](https://gitlab.com/jlemaes). Thank you!\n\n## Next steps\n\n* The upgrade to Rails 5.1 is [happening now](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/24852).\n* The upgrade to Rails 5.2 is [still in progress](https://gitlab.com/gitlab-org/gitlab-ee/merge_requests/8877) – unfortunately there are many incompatibilities.\n\nBecause upgrades to 5.1 and 5.2 should be relatively small, we aim to do each upgrade in a single merge request. The upgrade to Rails 6 is expected to be bigger, so hopefully the same approach we used for Rails 5 upgrade will be useful in this case too.\n\nPhoto by Cody Board on [Unsplash](https://unsplash.com/photos/2hu-SSktidc)\n{: .note}\n",[1347,1286,703,268],{"slug":6341,"featured":6,"template":678},"upgrade-to-rails5","content:en-us:blog:upgrade-to-rails5.yml","Upgrade To Rails5","en-us/blog/upgrade-to-rails5.yml","en-us/blog/upgrade-to-rails5",{"_path":6347,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6348,"content":6354,"config":6359,"_id":6361,"_type":16,"title":6362,"_source":17,"_file":6363,"_stem":6364,"_extension":20},"/en-us/blog/5-teams-that-made-the-switch-to-gitlab-ci-cd",{"title":6349,"description":6350,"ogTitle":6349,"ogDescription":6350,"noIndex":6,"ogImage":6351,"ogUrl":6352,"ogSiteName":692,"ogType":693,"canonicalUrls":6352,"schema":6353},"5 Teams that made the switch to GitLab CI/CD","See what happened when these five teams moved on from old continuous integration and delivery solutions and switched to GitLab CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678657/Blog/Hero%20Images/ci-cd-competitive-campaign-blog-cover.png","https://about.gitlab.com/blog/5-teams-that-made-the-switch-to-gitlab-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 Teams that made the switch to GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-04-25\",\n      }",{"title":6349,"description":6350,"authors":6355,"heroImage":6351,"date":6356,"body":6357,"category":14,"tags":6358},[4535],"2019-04-25","\nNo team is immune to process challenges, and as organizations grow these challenges only get worse. Sometimes there’s a lack of visibility during the development process, sometimes legacy systems create instability and lack functionality, and sometimes things just _stop working_. Continuous integration and delivery [(CI/CD)](/topics/ci-cd/) enables teams to deploy faster, and finding the right tool can make a big difference in the development lifecycle. Great companies know how to identify problems and when it’s time to find better solutions.\n\nWe’ve previously shared [why teams love GitLab CI/CD](/blog/why-gitlab-ci-cd/), and now we want to highlight five real-world examples of teams that abandoned dated continuous integration and delivery solutions and made the switch to GitLab CI/CD. We’ll show you how they:\n\n*   Reduced costs.\n*   Deployed faster.\n*   Improved efficiency.\n*   Made engineers’ lives easier.\n\n### Verizon Connect\n\nThe Verizon Connect Telematics Container Cloud Platform team had several challenges: too many tasks, disjointed processes, and outdated, Java-based monolithic applications. Add tools like [BitBucket](/competition/bitbucket/), Jenkins, and Jira in the mix and the Verizon Connect team was struggling with _data center builds that took nearly 30 days_. It was time to start from scratch.\n\nThe team chose GitLab to support this infrastructure initiative and reduced data center deploys from 30 days to _under eight hours_.\n\n[Read on](/blog/verizon-customer-story/)\n{: .alert .alert-gitlab-purple}\n\n### Ticketmaster\n\nFor the Ticketmaster mobile team, a two-hour pipeline for a minor change was the last straw. After years with Jenkins and a system weighed down by plugins and legacy development, they knew they needed to reevaluate their continuous integration and delivery tools.\n\nAfter adopting GitLab CI/CD, Ticketmaster was able to move to weekly releases, decreasing their pipeline execution time from two hours to _only eight minutes_ to build, test, and publish artifacts.\n\nLearn how GitLab CI/CD gave the mobile team their Friday afternoons back.\n\n[Read more](/blog/continuous-integration-ticketmaster/)\n{: .alert .alert-gitlab-purple}\n\n### HumanGeo\n\nAs a software development company, HumanGeo ships a lot of code. Development speed is vital, and when Jenkins CI became yet another thing to manage, they needed to make a change.\n\nJustin Shelton, an engineer at HumanGeo, talks about why they decided to switch to GitLab CI/CD, and how they were able to:\n\n*   Cut admin time by 96 percent.\n*   Cut costs by 33 percent.\n*   Increase the pace of development.\n\n[Learn how](/blog/humangeo-switches-jenkins-gitlab-ci/)\n{: .alert .alert-gitlab-purple}\n\n### Wag!\n\nIn three years, Wag! has supported more than one billion walks through its on-demand dog walking, sitting, and boarding mobile app. The engineering team was searching for a simplified solution that would streamline the development process. The company had been using Travis and other continuous integration and delivery systems but wanted something with a better interface that offered more control.\n\nWag!'s infrastructure engineers no longer have to manually stage and test their work. They now use the full GitLab CI/CD pipeline – so whether it's the Android application, the web application, the API, or infrastructure, it's all being tested, built, and deployed through GitLab.\n\n[Check it out](/blog/wag-labs-blog-post/)\n{: .alert .alert-gitlab-purple}\n\n### Paessler AG\n\nPaessler AG’s PRTG Network Monitor is used by enterprises and organizations of all sizes and industries across more than 170 countries. It’s critical that their monitoring service is able to keep up with developments but stability issues meant that sometimes things just stopped working.\n\nThe Paessler team initially chose GitLab for version control, but after seeing the functionality and potential of GitLab pipelines, they decided to replace Jenkins as well. Since adopting GitLab CI/CD, the Paessler AG team now has 4x more releases and 90 percent of QA self-served.\n\n[Read the case study](/customers/paessler/)\n{: .alert .alert-gitlab-purple}\n\nWant to know what GitLab CI/CD could do for your team? You’re invited to join us for our CI/CD webcast, _Mastering continuous software development_. Learn how GitLab’s built-in CI/CD helps teams apply continuous software development without all the complicated integrations and plugin maintenance.\n\nIn this webcast, we’ll cover:\n\n* Three main approaches to the continuous software development methodology.\n* The benefits of continuous integration, delivery and deployment practices.\n* A demonstration of GitLab’s CI/CD pipeline to build, test, deploy, and monitor your code.\n\n{::options parse_block_html=\"true\" /}\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>&nbsp;&nbsp;\nWatch GitLab's [Mastering continuous software development](/webcast/mastering-ci-cd/) webcast\n&nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n{: .alert .alert-webcast}\n",[916,894,110],{"slug":6360,"featured":6,"template":678},"5-teams-that-made-the-switch-to-gitlab-ci-cd","content:en-us:blog:5-teams-that-made-the-switch-to-gitlab-ci-cd.yml","5 Teams That Made The Switch To Gitlab Ci Cd","en-us/blog/5-teams-that-made-the-switch-to-gitlab-ci-cd.yml","en-us/blog/5-teams-that-made-the-switch-to-gitlab-ci-cd",{"_path":6366,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6367,"content":6373,"config":6378,"_id":6380,"_type":16,"title":6381,"_source":17,"_file":6382,"_stem":6383,"_extension":20},"/en-us/blog/progressive-delivery-using-review-apps",{"title":6368,"description":6369,"ogTitle":6368,"ogDescription":6369,"noIndex":6,"ogImage":6370,"ogUrl":6371,"ogSiteName":692,"ogType":693,"canonicalUrls":6371,"schema":6372},"Progressive Delivery: How to get started with Review Apps","Progressive Delivery is the next evolution of continuous delivery, and Review Apps are a key enabler.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666841/Blog/Hero%20Images/progressive-delivery-review-apps.jpg","https://about.gitlab.com/blog/progressive-delivery-using-review-apps","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Progressive Delivery: How to get started with Review Apps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jason Yavorska\"}],\n        \"datePublished\": \"2019-04-19\",\n      }",{"title":6368,"description":6369,"authors":6374,"heroImage":6370,"date":6375,"body":6376,"category":14,"tags":6377},[4945],"2019-04-19","\nIf you're not familiar with [Progressive Delivery](https://redmonk.com/jgovernor/2018/08/06/towards-progressive-delivery/),\nit's a new set of best practices that is gaining hold for delivering safely and frequently to\nproduction. Although it's not a completely new idea in the same way that continuous\ndelivery originally was, it is a clear evolution of those ideas that brings something\nnew to the table. By taking a step back and considering the corpus of knowledge and experience\ngained over the last 10 years, then applying a bit of systems thinking to\nhow all these different practices interact with emerging technologies, it has set a new\nbaseline for how software delivery can be done effectively.\n\nWe discuss our overall vision for Progressive Delivery on our [CI/CD vision page](/direction/ops/#progressive-delivery),\nwhich also links to a few more resources if you're not up to speed with the concept in general.\n\nIn summary, though, continuous delivery gets you out of the mode of shipping one, big, risky\ndeployment to production, and instead breaks that risk up into many tiny parts – each with a\nfraction of the risk. Progressive Delivery takes this a step further by enabling you to\n[canary test code](https://docs.gitlab.com/ee/user/project/canary_deployments.html) in\nproduction with a small portion of your user base, use [feature flags](https://docs.gitlab.com/ee/operations/feature_flags.html)\nto manage rollout pacing, tie everything together with [tracing](https://docs.gitlab.com/ee/operations/tracing.html),\nand automate the further deployment or rollback of that code depending on how it performs.\n\n## How Review Apps can help enable Progressive Delivery\n\nLet me begin by explaining what GitLab Review Apps are:\n\n[GitLab Review Apps](https://docs.gitlab.com/ee/ci/review_apps/) are\nstaging environments that are automatically created for every branch and/or merge request. They are a collaboration tool\nbuilt into GitLab that helps take the hard work out of providing an environment to\nshowcase or validate product changes. There are a lot of different ways to configure\nthem, but the recommended way is to automatically create review app instances during your\n[merge request pipelines](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html). Doing this\nwill ensure that any merge request that is being considered will have an application\nthat developers can connect to to validate their changes.\n\nWith GitLab, we go a step beyond simply creating the review environment: we make it accessible.\n\nOnce configured, on your merge request page you'll now see a \"view app\" button that, as long as your\n[route maps](https://docs.gitlab.com/ee/ci/review_apps/#route-maps) are configured correctly, will allow your\nusers to jump right to the changed content. Review apps do work even without the route maps – in that case\nthey will take you to the home page of your app – but with them they almost feel like magic.\n\n![Review app](https://docs.gitlab.com/ee/ci/review_apps/img/review_apps_preview_in_mr.png \"Review app\"){: .shadow.medium.center}\n\nReview apps are a powerful tool on their own for enabling quick iteration, but if we think about\nthem in the context of Progressive Delivery, a whole new set of possibilities opens up.\n\n## Review apps for progressive validation\n\nAs mentioned above, a typical Progressive Delivery flow involves using targeted feature flags to validate\nchanges as they flow to production environments. Review apps, if configured to point to production\ndata/endpoints instead of ephemeral data, can serve as a merge request-based window into the changes\nthat are being considered for release.\n\nSome of this will of course depend on your code, your testing procedures, and environments. You may\npoint review apps at production endpoints from the moment they are spun up, or perhaps only later\nin your merge request pipeline after some initial validation.\n\nSince anyone can use these environments, you can point anyone with a stake in the success of the\nnew feature to the review app, and they are able to see the live behavior, using their own real\ndata, immediately in their own web browser. This is incredibly powerful for enabling rapid feedback\nand iteration. As a preview, we're also looking to improve this capability by adding an\n[easy-to-use review interface for collecting feedback](https://gitlab.com/gitlab-org/gitlab-ee/issues/10761)\nright into review apps directly.\n\n## Feature flags and tracing\n\nWe can take this idea even one more step further. Using [per-environment feature flag behaviors](https://docs.gitlab.com/ee/operations/feature_flags.html#define-environment-specs), we\ncan control the behavior of the review app environment in any way that the production environment can\nbe controlled. This opens up the possibility of validating any combination.\n\nFinally, since review apps are built and deployed from GitLab CI/CD, all the predefined CI/CD environment\nvariables are available to the deploy script. You could configure your application to use your\nmerge request ID (`CI_MERGE_REQUEST_ID`) as its unique ID for transaction tracing, tying transactions\nin the system automatically to the appropriate GitLab merge request.\n\n## As you can see, there's a ton of potential for Progressive Delivery here\n\nReview apps don't replace\nthe role of feature flags in a Progressive Delivery pipeline, but they provide an incredible\nsupplement that enables segmented validation in a completely new way. All in all, it's such an exciting time for\ncontinuous delivery – there's so much innovation happening on the process and technology fronts, and I'm\ncertain we're only scratching the surface of where we're headed.\n\nReview Apps is just one way [GitLab CI/CD](/solutions/continuous-integration/) enables Progressive Delivery. Join us for our webcast _Mastering continuous software development_ and learn how GitLab’s built-in CI/CD helps teams implement Progressive Delivery workflows, without the complicated integrations and plugin maintenance.\n\n[Watch the GitLab CI/CD webcast](/webcast/mastering-ci-cd/)\n{: .alert .alert-gitlab-purple .text-center}\n\nIf you have more ideas on how to use review apps even more effectively, or where you see the technology\nevolving next, please share in the comments.\n\nPhoto by [Helloquence](https://unsplash.com/photos/5fNmWej4tAA?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[110,749,832],{"slug":6379,"featured":6,"template":678},"progressive-delivery-using-review-apps","content:en-us:blog:progressive-delivery-using-review-apps.yml","Progressive Delivery Using Review Apps","en-us/blog/progressive-delivery-using-review-apps.yml","en-us/blog/progressive-delivery-using-review-apps",{"_path":6385,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6386,"content":6392,"config":6398,"_id":6400,"_type":16,"title":6401,"_source":17,"_file":6402,"_stem":6403,"_extension":20},"/en-us/blog/more-performant-and-robust-task-lists-in-gitlab",{"title":6387,"description":6388,"ogTitle":6387,"ogDescription":6388,"noIndex":6,"ogImage":6389,"ogUrl":6390,"ogSiteName":692,"ogType":693,"canonicalUrls":6390,"schema":6391},"How we delivered more performant and robust task lists in GitLab","How simple checkboxes became a challenging engineering problem – and how we fixed it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668319/Blog/Hero%20Images/more-robust-task-lists.jpg","https://about.gitlab.com/blog/more-performant-and-robust-task-lists-in-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we delivered more performant and robust task lists in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fatih Acet\"},{\"@type\":\"Person\",\"name\":\"Brett Walker\"}],\n        \"datePublished\": \"2019-04-05\",\n      }",{"title":6387,"description":6388,"authors":6393,"heroImage":6389,"date":6395,"body":6396,"category":14,"tags":6397},[6394,6297],"Fatih Acet","2019-04-05","\n[GitLab task lists](https://docs.gitlab.com/ee/user/markdown#task-lists) are\na list of checkboxes that you can include anywhere in GitLab where you can have\n[GitLab Flavored Markdown (GFM)](https://docs.gitlab.com/ee/user/markdown#gitlab-flavored-markdown-gfm).\nThis includes issue descriptions and comments, as well as merge requests and epics.\nThey can be used for a list of items to consider when building a feature, tracking\ntasks for new employees to complete when onboarding, or even managing that list\nof materials to purchase for your next home renovation. You can use them as todo\nlists, and so checking off an item should be quick and satisfying.\n\n## More checkboxes, more problems\n\nIn the past, task lists with several items, even dozens, worked fairly well. Check\nan empty checkbox, and a database record gets updated. The checkbox is then displayed\nas checked. Done.\n\nHowever, as the number of items increases, and the consequent\nmarkdown becomes more complex and longer, problems begin to appear. For example,\nvisually the checkbox appears checked, but because updating the backend takes a\nlonger time, if you checked another checkbox, the screen would refresh several seconds\nlater and the checkbox might then be unchecked. It soon became next to impossible\nto go down a list and check off items without waiting 10 seconds between each one.\nYet another problem was that if other users were also checking items on the list,\nyour change could be erased by them checking their item – they were overwriting\nyour data.\n\nIn [GitLab 11.8](/releases/2019/02/22/gitlab-11-8-released/#performance-improvements) (released on Feb. 22, 2019),\nwe significantly increased the performance of task lists, as well as making them\nmuch more robust. Here's how we did it:\n\n### Essentially we wanted:\n\n- Checking a checkbox to be as fast as possible.\n- Many users to concurrently interact with checkboxes in the same task list,\nwithout overwriting each other.\n\nBoth the performance and data integrity issues stemmed from the fact that we were\nupdating the complete markdown. This meant that we changed the markdown source in\nthe browser with the updated checkbox, sent it to the backend, where it was saved\nto the database, and then re-rendered so that we could cache the new  and send\nit back to the user.\n\n## A scalable solution\n\nBut what if we could update a single checkbox, and send only that to the backend? That\nmight allow multiple users to check off as many tasks as they wanted, without clobbering\neach other. And what if we didn't have to do any markdown rendering at all? We wouldn't\nhave to do any markdown processing, or process embedded issue links, or query if\nlabels have changed, or any of the other advanced things that go on when updating\nan issue. Performance would definitely increase in this case.\n\n### Frontend work\n\nOn the frontend, with only a small modification to the\n[deckar01/task_list](https://github.com/deckar01/task_list/commit/d1c96451df5fb8fdadc2cd080f65ffe2d2076a3a)\ngem we use, we were able to pass the exact text and line number in the markdown source\nfor the clicked task.\n[Wrap this piece of information](https://gitlab.com/gitlab-org/gitlab-ce/blob/b4165554113a7f9ce9fecd7d169f9a64686b5c44/app/assets/javascripts/task_list.js#L63-68)\nin a new `update_task` parameter for our update endpoint, and send it to the backend.\n\n### Backend work\n\nOn the backend,\n[we needed to verify](https://gitlab.com/gitlab-org/gitlab-ce/blob/b4165554113a7f9ce9fecd7d169f9a64686b5c44/app/services/task_list_toggle_service.rb#L30-51)\nthat the task we were interested in still existed in exactly the same format – the text had to match\nthe exact line number in the source. This meant that even if someone changed text above or below\nthe task item, as long as our line matched exactly, we could update that line in the latest source\nand save it without losing changes.\n\nIn order to update our cached HTML so that we wouldn't have to re-render it, we turned on\nthe `SOURCEPOS` flag of the CommonMark renderer, which adds a `data-sourcepos` attribute to the HTML.\nFor example, a task item's might look like this:\n\n```\n\u003Cli data-sourcepos=\"1:1-1:12\" class=\"task-list-item\">\n  \u003Cinput type=\"checkbox\" class=\"task-list-item-checkbox\" disabled> Task 1\n\u003C/li>\n```\n\nWith a little [Nokogiri](https://nokogiri.org) magic we were able to find the correct line\nand toggle the checked attribute.\n\nSince we updated the cache directly, we completely bypassed any markdown rendering,\nprocessing of special attributes, etc. Performance dramatically increased. However,\nsince we are not able to get it down to zero, we disabled the checkboxes while the\nrequest was in flight to ensure we weren't getting clicks on other tasks.\n\nThe result: a much more satisfying task list.\n\n[Brett Walker](https://gitlab.com/digitalmoksha) worked on the backend changes and\n[Fatih Acet](https://gitlab.com/fatihacet) worked on the frontend changes in this\nimprovement. See more details in [the GitLab issue](https://gitlab.com/gitlab-org/gitlab-ce/issues/19745).\n\nPhoto by [Glenn Carstens-Peters](https://unsplash.com/photos/RLw-UC03Gwc?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[1979,915],{"slug":6399,"featured":6,"template":678},"more-performant-and-robust-task-lists-in-gitlab","content:en-us:blog:more-performant-and-robust-task-lists-in-gitlab.yml","More Performant And Robust Task Lists In Gitlab","en-us/blog/more-performant-and-robust-task-lists-in-gitlab.yml","en-us/blog/more-performant-and-robust-task-lists-in-gitlab",{"_path":6405,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6406,"content":6412,"config":6417,"_id":6419,"_type":16,"title":6420,"_source":17,"_file":6421,"_stem":6422,"_extension":20},"/en-us/blog/application-modernization-best-practices",{"title":6407,"description":6408,"ogTitle":6407,"ogDescription":6408,"noIndex":6,"ogImage":6409,"ogUrl":6410,"ogSiteName":692,"ogType":693,"canonicalUrls":6410,"schema":6411},"7 Best practices for application modernization","Use these best practices to avoid common pitfalls on the application modernization journey.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671258/Blog/Hero%20Images/just-commit-blog-cover.png","https://about.gitlab.com/blog/application-modernization-best-practices","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"7 Best practices for application modernization\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-03-27\",\n      }",{"title":6407,"description":6408,"authors":6413,"heroImage":6409,"date":6414,"body":6415,"category":14,"tags":6416},[4535],"2019-03-27","\n\nA journey starts with a single step, any motivational poster can you tell you that, but what about all the steps after?\nEven if you know where you're going, are you getting there in the most efficient way possible?\nBefore you start an application modernization quest of your own, it helps to get an idea of the road ahead.\n\nYou don't have to have everything mapped out from the start, and chances are high your plans will change.\nEnterprises can learn a lot from [teams that modernized their legacy systems successfully](/blog/application-modernization-examples/), but there are also valuable lessons from those that failed.\n\n## Why legacy modernization projects fail\n\nEnterprises that dive into the application modernization process are trying to solve big problems, but great intentions rarely guarantee success.\nIn 1999, Carnegie Mellon researchers dove into [10 reasons why legacy re-engineering efforts fail](https://www.cs.cmu.edu/~aldrich/courses/654-sp05/readings/Bergey99.pdf) that are still very relevant today:\n\n1. The organization adopts a flawed strategy from the start.\n2. The organization relies too heavily on outside consultants.\n3. The team is tied down to old technologies and inadequate training.\n4. The organization thinks it has its legacy system under control (it doesn't).\n5. The needs of the organization are oversimplified.\n6. The overall software architecture isn't given enough consideration.\n7. There is no defined application modernization process.\n8. Inadequate planning and follow through.\n9. Lack of long-term commitment to the strategy.\n10. Leaders pre-determine technical decisions.\n\nEvery team faces legacy modernization challenges.\nCommitting to the process is the first step to meeting those challenges head on.\nAs teams go through the modernization journey, use these best practices to avoid common pitfalls and ensure long-term success.\n\n### 1. Create a modernization team\n\nGroups can learn a lot from each other and a variety of voices at the table can point out weaknesses and improve the modernization process.\nWhen choosing a team or developing an innovation group, avoid thinking along legacy lines which divide teams by stages of the software lifecycle.\n[Think about building a cross-functional team of 8–12 people](/blog/beyond-application-modernization-trends/) who can focus on developing the culture, process, and tools needed to continuously deliver software.\n\n### 2. Disagree, commit, and disagree\n\nWith more voices come more opinions.\nIt's a powerful way to innovate and generate great ideas, but it's also the most effective way to be ineffective.\nDecisions sometimes have to be made without a 100 percent buy-in.\nEverything can be questioned but as long as a decision is in place, we should expect people to commit to executing it.\n\"Disagree and commit\" is [one of GitLab's core values](https://handbook.gitlab.com/handbook/values/#disagree-commit-and-disagree) and it's a common business principle that keeps projects moving forward.\n\nWhether decisions are left to one individual or distributed will largely depend on the size of the organization.\nFor all final decision-makers during the application modernization process: listen to other points of view, thank those who contribute ideas and feedback, consider options carefully, and commit to a course of action.\n\n### 3. Map the development workflow\n\nMany organizations have been bogged down by the sheer number of tools, plug-ins, and platforms they use to accommodate everyday tasks.\nSome workflows have more in common with a Rube Goldberg device than a logical order of operation, but mapping out the development workflow is a necessity when undertaking a legacy modernization project.\nThis step is usually when the headaches of toolchain complexity come to light.\n\nLook at every tool being used across teams and identify dependencies.\nMore handoffs present more opportunities for single points failure, and any new applications added to the mix need to be able to play well with others.\nEven if you don't mind teams finding their own solutions so that they can work creatively, it's a good rule of thumb to [identify all privately used tools](https://www.pluralsight.com/blog/career/shadow-it-security-threat) that might be in the mix.\n\n### 4. Set small modernization goals\n\nHaving an entire timeline mapped out months in advance sets you up for failure.\nWhy? Projects inevitably change once they get started.\nTrying to map moving targets months in advance is an exercise in futility that ends in projects that are rushed, incomplete, or late anyway. Reducing the cycle time and focusing instead on iterating towards smaller goals will have a much higher likelihood of success.\nTeams that master iteration respond to feedback faster, adapt more quickly, and complete their projects faster than their large-scale counterparts.\nBy shortening the timeframe and reducing the scope of each goal, you're able to respond to changing needs, adjust your long-term plans with the feedback you receive along the way, and radically reduce engineering risk.\n\nWhen planning for major milestones (when certain tools will be retired or migrated, when updates will occur, team training, etc), focus instead on the many small steps between them.\nA smaller deploy introduces less changes that can potentially introduce issues, ensuring that each steps moves smoothly.\n\n### 5. Prioritize legacy data\n\nPreventing data loss is a key priority during the modernization process.\nEvaluate the data being processed, moved, and stored and put it into categories.\nWhether it's \"high, moderate, or low\" or \"green, yellow, and red,\" make sure the team understands each data category and what safeguards to have in place for each.\n\n### 6. Don't modernize bad habits\n\nMany organizations have squandered a clean slate by infusing new tools with old habits.\nTake a close look at your development workflow and identify instances of duplicated data, manual tasks, inefficiencies, and other habits that could derail your application modernization process in its tracks.\nMany of these practices are due to a lack of training or documentation – both easily fixable problems.\nA new tool doesn't solve bad habits, but bad habits can derail new tools.\n\n### 7. Close the skill gap\n\nThe number of programming languages, tools, systems, and methodologies that developers have to know is immense.\nIt's a challenge for teams to develop the knowledge they need to work quickly, and adding a new system to the mix should be considered carefully.\nKeeping teams in the loop on changes and then dedicating resources to make sure they understand how to navigate the new workflows will be the _most important part of the application modernization process_.\nMake this an ongoing, long-term commitment to organizational success and continue to document best practices long after legacy systems are turned off. Tools are only as good as the people who use them.\n\nAre you ready to tackle application modernization? [Just commit.](/blog/application-modernization-best-practices/)\n",[832,894],{"slug":6418,"featured":6,"template":678},"application-modernization-best-practices","content:en-us:blog:application-modernization-best-practices.yml","Application Modernization Best Practices","en-us/blog/application-modernization-best-practices.yml","en-us/blog/application-modernization-best-practices",{"_path":6424,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6425,"content":6431,"config":6438,"_id":6440,"_type":16,"title":6441,"_source":17,"_file":6442,"_stem":6443,"_extension":20},"/en-us/blog/connecting-gitlab-and-pantheon-streamline-wordpress-drupal-workflows",{"title":6426,"description":6427,"ogTitle":6426,"ogDescription":6427,"noIndex":6,"ogImage":6428,"ogUrl":6429,"ogSiteName":692,"ogType":693,"canonicalUrls":6429,"schema":6430},"Streamlining Drupal and WordPress with GitLab and Pantheon","Our guest author, a Developer Programs Engineer at Pantheon, shares how to automate WordPress deployments using GitLab CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680516/Blog/Hero%20Images/gitlab-pantheon.png","https://about.gitlab.com/blog/connecting-gitlab-and-pantheon-streamline-wordpress-drupal-workflows","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to connect GitLab and Pantheon to streamline Drupal and WordPress workflows\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andrew Taylor\"}],\n        \"datePublished\": \"2019-03-26\",\n      }",{"title":6432,"description":6427,"authors":6433,"heroImage":6428,"date":6435,"body":6436,"category":14,"tags":6437},"How to connect GitLab and Pantheon to streamline Drupal and WordPress workflows",[6434],"Andrew Taylor","2019-03-26","\n\nAs a member of the developer relations team at [Pantheon](https://pantheon.io), I’m always looking for new ways to help WordPress and Drupal developers solve workflow problems with automation. To this end, I love exploring new tools and how they can be used effectively together.\n\n### One frequent problem I see teams facing is the dreaded single staging server.\n\nIt’s not fun to wait in line for your turn to use the staging server or to send clients a URL and tell them to review some work but ignore other, incomplete pieces.\n\n[Multidev environments](https://pantheon.io/docs/multidev/), one of Pantheon’s advanced developer tools, solves this issue by allowing environments matching Git branches to be created on demand. Each multidev environment has its own URL and database, making independent work, QA, and approval possible without developers stepping on each other's toes.\n\nHowever, Pantheon doesn’t provide source control management (SCM) or continuous integration and continuous deployment (CI/CD) tooling. Instead, the platform is flexible enough to be integrated with your preferred tools.\n\n### The next problem I see consistently is teams using different tools to manage development work and to build and deploy that work.\n\nFor example, using one tool for SCM and something else for CI/CD. Having to jump between tools to edit code and diagnose failing jobs is cumbersome.\n\n[GitLab](/) solves this problem by providing a full suite of development workflow tools, such as SCM, with features like issues and merge requests, best-in-class CI/CD, and a container registry, to name a few. I haven't come across another application that is so complete to manage development workflow.\n\nAs someone who loves automation, I explored connecting Pantheon to GitLab so that commits to the master branch on GitLab deploy to the main dev environment on Pantheon. Additionally, merge requests on GitLab can create and deploy code to Pantheon multidev environments.\n\nThis tutorial will walk you through setting up the connection between GitLab and Pantheon so you, too, can streamline your WordPress and Drupal workflow.\n\nThis can be done with [GitLab repository mirroring](https://docs.gitlab.com/ee/user/project/repository/repository_mirroring.html), but we will be setting it up manually to get some experience with [GitLab CI](https://docs.gitlab.com/ee/ci/) and have the ability to expand beyond just deployment in the future.\n\n## Background\n\nFor this post, you need to know that Pantheon breaks each site down into three components: code, database, and files.\n\nThe code portion of a Pantheon site includes the CMS files, such as WordPress core, plugins and themes. These files are managed in a [Git repository](https://git-scm.com/book/en/v2/Git-Basics-Getting-a-Git-Repository) hosted by Pantheon, which means we can deploy code from GitLab to Pantheon with Git.\n\nWhen Pantheon refers to files, it is the media files, such as images, for your site. These are typically uploaded by site users and are ignored in Git.\n\nYou can [create a free account](https://pantheon.io/register), learn more about the [Pantheon workflow](https://pantheon.io/docs/pantheon-workflow), or [sign up for a live demo](https://pantheon.io/live-demo) on pantheon.io.\n\n## Assumptions\n\nMy project is named `pantheon-gitlab-blog-demo`, both on Pantheon and GitLab. You should use a unique project name. This tutorial uses a WordPress site. Drupal can be substituted, but some modification will be needed.\n\nI'll also be using the [Git command line](https://git-scm.com/book/en/v2/Getting-Started-The-Command-Line) but you can substitute a [graphical interface](https://git-scm.com/book/en/v2/Appendix-A%3A-Git-in-Other-Environments-Graphical-Interfaces) if you prefer.\n\n## Create the projects\n\nFirst up, create a [new GitLab project](https://docs.gitlab.com/ee/user/project/working_with_projects.html#create-a-project) – we'll come back to this in a little bit.\n\nNow, [create a new WordPress site on Pantheon](https://pantheon.io/docs/launch-wordpress/). After your new site is created, you will need to install WordPress for the site dashboard.\n\n_You might be tempted to make some changes, such as adding or removing plugins, but please refrain. We haven't connected the site to GitLab yet and want to make sure all code changes, e.g. adding or removing plugins, go through GitLab._\n\nAfter WordPress is installed, go back to the Pantheon site dashboard and change the development mode to Git.\n\n![Pantheon Dashboard](https://about.gitlab.com/images/blogimages/pantheon-dashboard-after-fresh-wordpress-install.png){: .shadow.medium.center}\n\n## Initial commit to GitLab\n\nNext, we need to get the starting WordPress code from the Pantheon site over to GitLab. In order to do this, we will clone the code from the Pantheon site Git repository locally, then push it to the GitLab repository.\n\nTo make this easier, and more secure, [add an SSH key to Pantheon](https://pantheon.io/docs/ssh-keys/) to avoid entering your password when cloning Pantheon Git repository. While you're at it, [add an SSH key to GitLab](https://docs.gitlab.com/ee/ssh/) as well.\n\nTo do this, clone the Pantheon site locally by copying the command in the Clone with Git drop-down field from the site dashboard.\n\n![CPantheon git connection](https://about.gitlab.com/images/blogimages/pantheon-git-connection-info.png){: .shadow.center}\n\n_If you need help, see the [Pantheon Start With Git](https://pantheon.io/docs/git/#clone-your-site-codebase) documentation._\n\nNext, we want to change the `git remote origin` to point to GitLab, instead of Pantheon. This can be done with the [`git remote` command](https://git-scm.com/docs/git-remote).\n\nHead over to your GitLab project and grab the repository URL, which can be found at in the Clone drop-down of the project details screen. Be sure to use the Clone with SSH variant of the GitLab repository URL, since we set up an SSH key earlier.\n\n![Gitlab git connection](https://about.gitlab.com/images/blogimages/gitlab-git-connection-info.png){: .shadow.medium.center}\n\nThe default `git remote` for the local copy of our code repository is `origin`. We can change it with `git remote set-url origin [GitLab repository URL]`, replacing `[GitLab repository URL]` with your actual GitLab repository URL.\n\nFinally, run `git push origin master --force` to send the WordPress code from the Pantheon site to GitLab.\n\n_The --force flag is only needed as part of this one-time step. Subsequent `git push` commands to GitLab won't need it._\n\n## Set up credentials and variables\n\nRemember how we added an SSH key locally to authorize with Pantheon and GitLab? Well, an SSH token can also be used to authorize GitLab and Pantheon.\n\nGitLab has some great documentation, and we will be looking at the [SSH keys when using the Docker executor section of the Using SSH keys with GitLab CI/CD doc](https://docs.gitlab.com/ee/ci/ssh_keys/#ssh-keys-when-using-the-docker-executor).\n\nAt this point, we will need to do the first two steps: _Create a new SSH key pair locally with ssh-keygen and Add the private key as a variable to your project._\n\nWhen done, `SSH_PRIVATE_KEY` should be set as a [GitLab CI/CD Environment Variables](https://docs.gitlab.com/ee/ci/variables/) in the project settings.\n\nTo take care of the third and fourth steps, create `.gitlab-ci.yml` file with the following contents:\n\n```\nbefore_script:\n  # See https://docs.gitlab.com/ee/ci/ssh_keys/\n  - eval $(ssh-agent -s)\n  - echo \"$SSH_PRIVATE_KEY\" | tr -d '\\r' | ssh-add - > /dev/null\n  - mkdir -p $HOME/.ssh && echo \"StrictHostKeyChecking no\" >> \"$HOME/.ssh/config\"\n  - git config --global user.email \"$GITLAB_USER_EMAIL\"\n  - git config --global user.name \"Gitlab CI\"\n  ```\n\nDon't commit the `.gitlab-ci.yml` file just yet, we will be adding more to it in the next section.\n\nNow, we need to take care of step 5, _add the public key from the one you created in the first step to the services that you want to have an access to from within the build environment._\n\nIn our case, the service we want to access from GitLab is Pantheon. Follow the Pantheon doc to [Add Your SSH Key to Pantheon](https://pantheon.io/docs/ssh-keys/#add-your-ssh-key-to-pantheon) to complete this step.\n\n_Be sure that the private SSH key is in GitLab and the public key is on Pantheon_\n\nWe will also need to set some additional environment variables. The first one should be named PANTHEON_SITE, and the value will be the machine name of your `Pantheon site`. and the value will be the *machine name* of your Pantheon site.\n\nYou can get the machine name from the end of the Clone with Git command. Since you already cloned the site locally, it will be the directory name of your local repository.\n\n![wordpress machine name](https://about.gitlab.com/images/blogimages/pantheon-machine-name.png){: .shadow.medium.center}\n\nThe next GitLab CI environment variable to set is `PANTHEON_GIT_URL`, which will be the Git repository URL of the Pantheon site that we used earlier.\n\n_Enter just the SSH repository URL, leaving off `git clone` and the site machine name at the end._\n\nPhew! Now that setup is done, we can move on to finishing our `.gitlab-ci.yml` file.\n\n## Create the deployment job\n\nWhat we will be doing with GitLab CI initially is very similar to what we did with Git repositories earlier. This time though, we will add the Pantheon repository as a second Git remote and then push the code from GitLab to Pantheon.\n\nTo do this, we will set up a [stage](https://docs.gitlab.com/ee/ci/yaml/#stages) named `deploy` and a [job](https://docs.gitlab.com/ee/ci/jobs/) named `deploy:dev`, as it will deploy to the dev environment on Pantheon. The resulting `.gitlab-ci.yml` file should look like this:\n\n```\nstages:\n- deploy\n\nbefore_script:\n  # See https://docs.gitlab.com/ee/ci/ssh_keys/\n  - eval $(ssh-agent -s)\n  - echo \"$SSH_PRIVATE_KEY\" | tr -d '\\r' | ssh-add - > /dev/null\n  - mkdir -p $HOME/.ssh && echo \"StrictHostKeyChecking no\" >> \"$HOME/.ssh/config\"\n  - git config --global user.email \"$GITLAB_USER_EMAIL\"\n  - git config --global user.name \"Gitlab CI\"\n\ndeploy:dev:\n  stage: deploy\n  environment:\n    name: dev\n    url: https://dev-$PANTHEON_SITE.pantheonsite.io/\n  script:\n    - git remote add pantheon $PANTHEON_GIT_URL\n    - git push pantheon master --force\n  only:\n    - master\n```\n\n`SSH_PRIVATE_KEY`, `PANTHEON_SITE`, and `PANTHEON_GIT_URL` should all look familiar - they are the environment variables we set up earlier. Having environment variables will allow us to re-use the values multiple times in our `.gitlab-ci.yml` file, while having one place to update them, should they change in the future.\n\nFinally, add, commit, and push the `.gitlab-ci.yml` file to send it to GitLab.\n\n## Verify the deployment\n\nIf everything was done correctly, the `deploy:dev` job run on GitLab CI/CD, succeed and send the `.gitlab-ci.yml` commit to Pantheon. Let's take a look!\n\n![deploy job](https://about.gitlab.com/images/blogimages/gitlab-deploy-dev-job.png){: .shadow.center}\n\n![deploy job passing](https://about.gitlab.com/images/blogimages/gitlab-deploy-dev-job-passed.png){: .shadow.center}\n\n![gitlab commit on pantheon dev](https://about.gitlab.com/images/blogimages/gitlab-commits-on-pantheon-dev.png){: .shadow.center}\n\n## Sending merge request branches to Pantheon\n\nThis next section makes use of my favorite Pantheon feature, [multidev](https://pantheon.io/docs/multidev), which allows you to create additional Pantheon environments on demand associated with Git branches.\n\nThis section is entirely optional as [multidev access is restricted](https://pantheon.io/docs/multidev-faq/), however, if you do have multidev access, having GitLab merge requests automatically create multidev environments on Pantheon is a huge workflow improvement.\n\nWe will start by making a new Git branch locally with `git checkout -b multidev-support`. Now, let's edit `.gitlab-ci.yml` again.\n\nI like to use the merge request number in the Pantheon environment name. For example, the first merge request would be `mr-1`, the second would be `mr-2`, and so on.\n\nSince the merge request changes, we need to define these Pantheon branch names dynamically. GitLab makes this easy by providing [predefined environment](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html) variables.\n\nWe can use `$CI_MERGE_REQUEST_IID`, which provides the merge request number. Let's put that to use, along with our global environment variables from earlier, and add a new deploy:multidev job to the end of our `.gitlab-ci.yml` file.\n\n```\ndeploy:multidev:\n  stage: deploy\n  environment:\n    name: multidev/mr-$CI_MERGE_REQUEST_IID\n    url: https://mr-$CI_MERGE_REQUEST_IID-$PANTHEON_SITE.pantheonsite.io/\n  script:\n    # Checkout the merge request source branch\n    - git checkout $CI_COMMIT_REF_NAME\n    # Add the Pantheon git repository as an additional remote\n    - git remote add pantheon $PANTHEON_GIT_URL\n    # Push the merge request source branch to Pantheon\n    - git push pantheon $CI_COMMIT_REF_NAME:mr-$CI_MERGE_REQUEST_IID --force\n  only:\n    - merge_requests\n```\n\nThis should look very similar to our `deploy:dev` job, only pushing a branch to Pantheon instead of `master`.\n\nAfter you add and commit the updated `.gitlab-ci.yml` file, push this new branch to GitLab with `git push -u origin multidev-support`.\n\nNext, let's create a new merge request from our `multidev-support` branch by following the _Create merge request_ prompt.\n\n![create merge request](https://about.gitlab.com/images/blogimages/gitlab-create-merge-request-prompt.png){: .shadow.medium.center}\n\nAfter creating the merge request, look for the  CI/CD job `deploy:multidev` to run.\n\n![multidev deploy success](https://about.gitlab.com/images/blogimages/multidev-branch-deploy-success.png){: .shadow.medium.center}\n\nLook at that – a new branch was sent to Pantheon. However, when we go to the multidev section of the site dashboard on Pantheon there isn't a new multidev environment.\n\n![multidev branch](https://about.gitlab.com/images/blogimages/pantheon-no-multidev-environments.png){: .shadow.medium.center}\n\nLet's look at the _Git_ Branches section.\n\n![mr branch](https://about.gitlab.com/images/blogimages/pantheon-mr-1-branch.png){: .shadow.medium.center}\n\nOur `mr-1` branch did make it to Pantheon after all. Go ahead and create an environment from the `mr-1` branch.\n\n![create multidev](https://about.gitlab.com/images/blogimages/pantheon-mr-1-multidev-creation.png){: .shadow.medium.center}\n\nOnce the multidev environment has been created, head back to GitLab and look at the _Operations > Environments_ section. You will notice entries for `dev` and `mr-1`.\n\nThis is because we added an `environment` entry with `name` and `url` to our CI/CD jobs. If you click on the open environment icon, you will be taken to the URL for the multidev on Pantheon.\n\n## Automating multidev creation\n\nWe _could_ stop here and try to remember to create a multidev environment each time there is a new merge request, but we can automate that process as well!\n\nPantheon has a command line tool, [Terminus](https://pantheon.io/docs/terminus/), that allows you to interact with the platform in an automated fashion. Terminus will allow us to provision our multidev environments from the command line – perfect for use in [GitLab CI](https://docs.gitlab.com/ee/ci/).\n\nWe will need a new merge request to test this, so let's create a new branch with `git checkout -b auto-multidev-creation`.\n\nIn order to use Terminus in GitLab CI/CD jobs we will need a machine token to authenticate with Terminus and a container image with Terminus available.\n\n[Create a Pantheon machine token](https://pantheon.io/docs/machine-tokens/#create-a-machine-token), save it to a safe place, and add it as a global GitLab environment variable named `PANTHEON_MACHINE_TOKEN`.\n\n_If you don't remember how to add GitLab environment variables, scroll up to where we defined `PANTHEON_SITE` earlier in the tutorial._\n\n## Building a Dockerfile with Terminus\n\nIf you don't have Docker or aren't comfortable working with `Dockerfile` files, you can use my image `registry.gitlab.com/ataylorme/pantheon-gitlab-blog-demo:latest` and skip this section.\n\n[GitLab has a container registry](https://docs.gitlab.com/ee/user/packages/container_registry/index.html) that allows us to build and host a Dockerfile for use in our project. Let's create a Dockerfile that has Terminus available, so we can interact with Pantheon.\n\nTerminus is a PHP-based command line tool, so we will start with a PHP image. I prefer to install Terminus via Composer so I'll be using [the official Docker Composer image](https://hub.docker.com/_/composer) as a base. Create a `Dockerfile` in your local repository directory with the following contents:\n\n```\n# Use the official Composer image as a parent image\nFROM composer:1.8\n\n# Update/upgrade apk\nRUN apk update\nRUN apk upgrade\n\n# Make the Terminus directory\nRUN mkdir -p /usr/local/share/terminus\n\n# Install Terminus 2.x with Composer\nRUN /usr/bin/env COMPOSER_BIN_DIR=/usr/local/bin composer -n --working-dir=/usr/local/share/terminus require pantheon-systems/terminus:\"^2\"\n```\nFollow the _Build and push images_ section of the [container registry documentation](https://gitlab.com/help/user/project/container_registry#build-and-push-images) to build an image from the `Dockerfile` and upload it to GitLab.\n\nVisit the _Registry_ section of your GitLab project. If things went according to plan you will see your image listed. Make a note of the image tag link, as we will need to use that in our `.gitlab-ci.yml` file.\n\n![container registry](https://about.gitlab.com/images/blogimages/gitlab-container-registry.png){: .shadow.center}\n\nThe `script` section of our `deploy:multidev` job is starting to get long, so let's move it to a dedicated file. Create a new file `private/multidev-deploy.sh` with the following contents:\n\n```\n#!/bin/bash\n\n# Store the mr- environment name\nexport PANTHEON_ENV=mr-$CI_MERGE_REQUEST_IID\n\n# Authenticate with Terminus\nterminus auth:login --machine-token=$PANTHEON_MACHINE_TOKEN\n\n# Checkout the merge request source branch\ngit checkout $CI_COMMIT_REF_NAME\n\n# Add the Pantheon Git repository as an additional remote\ngit remote add pantheon $PANTHEON_GIT_URL\n\n# Push the merge request source branch to Pantheon\ngit push pantheon $CI_COMMIT_REF_NAME:$PANTHEON_ENV --force\n\n# Create a function for determining if a multidev exists\nTERMINUS_DOES_MULTIDEV_EXIST()\n{\n    # Stash a list of Pantheon multidev environments\n    PANTHEON_MULTIDEV_LIST=\"$(terminus multidev:list ${PANTHEON_SITE} --format=list --field=id)\"\n\n    while read -r multiDev; do\n        if [[ \"${multiDev}\" == \"$1\" ]]\n        then\n            return 0;\n        fi\n    done \u003C\u003C\u003C \"$PANTHEON_MULTIDEV_LIST\"\n\n    return 1;\n}\n\n# If the mutltidev doesn't exist\nif ! TERMINUS_DOES_MULTIDEV_EXIST $PANTHEON_ENV\nthen\n    # Create it with Terminus\n    echo \"No multidev for $PANTHEON_ENV found, creating one...\"\n    terminus multidev:create $PANTHEON_SITE.dev $PANTHEON_ENV\nelse\n    echo \"The multidev $PANTHEON_ENV already exists, skipping creating it...\"\nfi\n```\n\nThe script is in the `private` directory as [it is not web accessible on Pantheon](https://pantheon.io/docs/private-paths/). Now that we have a script for our multidev logic, update the `deploy:multidev` section of `.gitlab-ci.yml` so that it looks like this:\n\n```\ndeploy:multidev:\n  stage: deploy\n  environment:\n    name: multidev/mr-$CI_MERGE_REQUEST_IID\n    url: https://mr-$CI_MERGE_REQUEST_IID-$PANTHEON_SITE.pantheonsite.io/\n  script:\n    # Run the multidev deploy script\n    - \"/bin/bash ./private/multidev-deploy.sh\"\n  only:\n    - merge_requests\n```\n\nIn order to make sure our jobs run with the custom image created earlier, add an `image` definition with the registry URL to `.gitlab-ci.yml`. My complete `.gitlab-ci.yml` file now looks like this:\n\n```\nimage: registry.gitlab.com/ataylorme/pantheon-gitlab-blog-demo:latest\n\nstages:\n- deploy\n\nbefore_script:\n  # See https://docs.gitlab.com/ee/ci/ssh_keys/\n  - eval $(ssh-agent -s)\n  - echo \"$SSH_PRIVATE_KEY\" | tr -d '\\r' | ssh-add - > /dev/null\n  - mkdir -p $HOME/.ssh && echo \"StrictHostKeyChecking no\" >> \"$HOME/.ssh/config\"\n  - git config --global user.email \"$GITLAB_USER_EMAIL\"\n  - git config --global user.name \"Gitlab CI\"\n\ndeploy:dev:\n  stage: deploy\n  environment:\n    name: dev\n    url: https://dev-$PANTHEON_SITE.pantheonsite.io/\n  script:\n    - git remote add pantheon $PANTHEON_GIT_URL\n    - git push pantheon master --force\n  only:\n    - master\n\ndeploy:multidev:\n  stage: deploy\n  environment:\n    name: multidev/mr-$CI_MERGE_REQUEST_IID\n    url: https://mr-$CI_MERGE_REQUEST_IID-$PANTHEON_SITE.pantheonsite.io/\n  script:\n    # Run the multidev deploy script\n    - \"/bin/bash ./private/multidev-deploy.sh\"\n  only:\n    - merge_requests\n```\n\nAdd, commit, and push `private/multidev-deploy.sh` and `.gitlab-ci.yml`. Now, head back to GitLab and wait for the CI/CD job to finish. The multidev creation takes a few minutes, so be patient.\n\nWhen it is finished, go check out the multidev list on Pantheon. Voila! The `mr-2` multidev is there.\n\n![mr-2](https://about.gitlab.com/images/blogimages/pantheon-mr-2-multidev.png){: .shadow.medium.center}\n\n## Conclusion\n\nOpening a merge request and having an environment spin up automatically is a powerful addition to any team's workflow.\n\nBy leveraging the powerful tools offered by both GitLab and Pantheon, we can connect GitLab to Pantheon in an automated fashion.\n\nSince we used GitLab CI/CD, there is room for growth in our workflow as well. Here are a few ideas to get you started:\n* Add a build step.\n* Add automated testing.\n* Add a job to enforce coding standards.\n* Add [dynamic application security testing](https://docs.gitlab.com/ee/user/application_security/dast/).\n\nDrop me a line with any thoughts you have on GitLab, Pantheon, and automation.\n\nP.S. Did you know Terminus, Pantheon’s command line tool, [is extendable via plugins](https://pantheon.io/docs/terminus/plugins/)?\n\nOver at Pantheon, we have been hard at work on version 2 of our [Terminus Build Tools Plugin](https://github.com/pantheon-systems/terminus-build-tools-plugin/), complete with GitLab support. If you don't want to do all this setup for each project, I encourage you to check it out and help us test the v2 beta. The terminus `build:project:create` command just needs a Pantheon token and GitLab token. From there, it will spin up one of our example projects, complete with Composer and automated testing, create a new project on GitLab, a new site on Pantheon, and connect the two by setting up environment variables and SSH keys.\n\n### About the guest author\n\nAndrew Taylor is a Developer Programs Engineer at [Pantheon](https://pantheon.io/).\n",[894,232,268,727],{"slug":6439,"featured":6,"template":678},"connecting-gitlab-and-pantheon-streamline-wordpress-drupal-workflows","content:en-us:blog:connecting-gitlab-and-pantheon-streamline-wordpress-drupal-workflows.yml","Connecting Gitlab And Pantheon Streamline Wordpress Drupal Workflows","en-us/blog/connecting-gitlab-and-pantheon-streamline-wordpress-drupal-workflows.yml","en-us/blog/connecting-gitlab-and-pantheon-streamline-wordpress-drupal-workflows",{"_path":6445,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6446,"content":6451,"config":6458,"_id":6460,"_type":16,"title":6461,"_source":17,"_file":6462,"_stem":6463,"_extension":20},"/en-us/blog/enabling-global-search-elasticsearch-gitlab-com",{"title":6447,"description":6448,"ogTitle":6447,"ogDescription":6448,"noIndex":6,"ogImage":6173,"ogUrl":6449,"ogSiteName":692,"ogType":693,"canonicalUrls":6449,"schema":6450},"Lessons from implementing global code search on GitLab.com","Read about some of the dead ends we've encountered on the way to enabling global code search on GitLab.com, and how we're working on a way forward.","https://about.gitlab.com/blog/enabling-global-search-elasticsearch-gitlab-com","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Lessons from our journey to enable global code search with Elasticsearch on GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mario de la Ossa\"}],\n        \"datePublished\": \"2019-03-20\",\n      }",{"title":6452,"description":6448,"authors":6453,"heroImage":6173,"date":6455,"body":6456,"category":14,"tags":6457},"Lessons from our journey to enable global code search with Elasticsearch on GitLab.com",[6454],"Mario de la Ossa","2019-03-20","\nWe're [working hard to switch our search infrastructure on GitLab.com](https://gitlab.com/groups/gitlab-org/-/epics/153) to\ntake advantage of our [Elasticsearch integration](https://docs.gitlab.com/ee/integration/advanced_search/elasticsearch.html), which should allow us to improve global search and enable global code search for our users.\n\nEnabling this integration on GitLab.com is important to us because it will unlock better search performance and allow us\nto improve the relevance of results for our GitLab.com users – something our self-managed users have been able to take advantage of for a few years now.\nWe've been working on this for a while, and have hit many dead ends and pitfalls which maybe you can learn from too.\n\n## Our plan\n\nWe have two very important things that need to happen: we must reduce the Elasticsearch index size,\nand we must improve the administration of the Elasticsearch integration.\n\n## 1. Reduce index size\n\nCurrently, the Elasticsearch index utilizes approximately 66 percent of the space the repos use.\nThis is our biggest blocker, as this is the bare minimum amount of space required – this number goes up when you consider the need for replicas.\n\nWe've attempted multiple things to get the index size down, but all of them resulted in minimal (or no) changes at all,\nso due to the complexity of implementing the changes we've decided to ignore them (at least for now).\n\n### Things we've tried\n\n#### Force merges\n\nWhen you delete a document from Elasticsearch, it doesn't actually free up space right away.\nInstead it does a soft delete, and Elasticsearch will release the space used in the future via an operation called a [merge](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-merge.html).\n\nIn [gitlab-org/gitlab-ee#7611](https://gitlab.com/gitlab-org/gitlab-ee/issues/7611) we investigated the possibility of forcing Elasticsearch\nto reclaim this space periodically via an operation called a [forcemerge](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-forcemerge.html).\nThis seemed like a very worthwhile thing to investigate as an Elasticsearch index could theoretically grow up to 50 percent more due to these soft deletions.\nIn the end though, we found out that a `forcemerge` is a blocking call, and causes extreme performance degradation while it runs –\nnot something you want in a production environment!\nSadly we were forced to abandon this, but we did learn a bit more about [how to tune Elasticsearch so merges are less painful, which we documented here](https://docs.gitlab.com/ee/integration/advanced_search/elasticsearch.html).\n\n#### NGram sizes\n\nIn order to allow users to search without using exact phrases (it would be annoying if a search for \"house\" didn't bring up \"houses\" for\nexample) we use what is called an [Edge NGram](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-edgengram-tokenizer.html)\nfilter for blobs (code files) and SHA1 strings (commit IDs).\n\nWe have our Edge NGram filters set to create a maximum length of 40.\nRight off the bat we knew we could not lower the maximum size for our SHA1 filter, since we want our users to be able to find commits no matter how many characters of the ID they give us, and the maximum is 40.\n\nWe could, however, play with the Edge NGram filter we use to analyze code, so we tested a few different scenarios in [gitlab-org/gitlab-ee#5585](https://gitlab.com/gitlab-org/gitlab-ee/issues/5585).\nWe came up with conflicting results, but the savings were between 7-15 percent.\nNot bad! We still haven't changed the maximum length though, as we still need to confirm that searching is not impacted unduly with such a change.\n\n#### Separate indexes\n\nCurrently, our Elasticsearch integration lumps all document types into the same index.\nThis is because, in order to only return results to which a user has access, we must check the Project the object belongs to for the user's access level, which would be very expensive to do if we had to do it result per result after Elasticsearch returns the results of the query.\n\nThat said, there was a chance that having separate indexes could improve our space usage, and it would definitely improve the re-indexing\nexperience, so in [gitlab-org/gitlab-ee#3217](https://gitlab.com/gitlab-org/gitlab-ee/issues/3217) we took a stab at it.\nWe learned that having separate indexes does nothing for space usage, which we already suspected since Elasticsearch 6.0 shipped with great support for [sparse fields](https://www.elastic.co/blog/minimize-index-storage-size-elasticsearch-6-0).\n\nWe're still looking into having separate indexes, as in testing we have discovered it [greatly improves indexing speed](https://gitlab.com/gitlab-org/gitlab-ee/issues/3217#note_130304358)\nand should also improve the experience of having to re-index certain models.\n\n## 2. Improve administration capabilities for Elasticsearch\n\nRight now, all administration related to Elasticsearch must be done on the Elasticsearch cluster directly.\nWe also currently require the Elasticsearch integration to be an all-or-nothing deal: you must enable it for all projects, or none of them.\nTo make matters worse, when we make a change to the index schema, we require a full re-index of the entire repo right away in order for the update to work.\nWe need to fix all these things and make Elasticsearch easier to administer from within GitLab if we want to have a fighting chance at\nenabling Elasticsearch support on GitLab.com.\n\nSome concrete things we're working on:\n\n### Better cluster visibility\n\nIn order to help the administration of Elasticsearch, we must enable better controls for it from within GitLab.\nIssues [gitlab-org/gitlab-ee#3072](https://gitlab.com/gitlab-org/gitlab-ee/issues/3072) and\n[gitlab-org/gitlab-ee#2973](https://gitlab.com/gitlab-org/gitlab-ee/issues/2973) aim to provide a simple, but functional, admin interface\nfor Elasticsearch within GitLab.\n\n### Graceful recovery\n\nCurrently, if some data fails to index, whether due to a Sidekiq outage or any other reason, the only solution is to\nre-index the full Elasticsearch cluster, which is painful! In [gitlab-org/gitlab-ee#5299](https://gitlab.com/gitlab-org/gitlab-ee/issues/5299)\nwe will be looking into ways to improve this.\n\n### Selective/progressive indexing\n\nIn [gitlab-org/gitlab-ee#3492](https://gitlab.com/gitlab-org/gitlab-ee/issues/3492) we will be taking a look at enabling\nElasticsearch on a project-by-project basis.\n\n### Allow disabling of code indexing\n\nIn [gitlab-org/gitlab-ee#7870](https://gitlab.com/gitlab-org/gitlab-ee/issues/7870) we're investigating making\ncode indexing optional. What this would mean is that global code search would not be available, but searching within a\nproject would work as it currently does, backed by direct Gitaly searches. This is attractive to us as it would bring\nsearch improvements to Projects, Groups, Issues, and Merge Requests. This will also be a very useful feature for self-managed\ninstances that want to have better search support for Issues/MRs/etc. but don't really need global code search. Indexing\nthe repos to enable global code search takes an incredible amount of time, so offering the choice of disabling it gives our\nself-managed users more choice.\n\n### Shard Elasticsearch per group\n\nIn [gitlab-org/gitlab-ee#10519](https://gitlab.com/gitlab-org/gitlab-ee/issues/10519) we're considering having separate Elasticsearch\nservers per group, similar to how Gitaly works, but on a group level instead of project level. Elasticsearch servers can become very large,\nreducing performance and making them less maintainable. By having a separate server per group we would also gain resiliency in case one\ncluster goes down, as only the group related to that cluster would be affected.\n\nWe're still investigating this approach as there are some concerns about how search would work if we had separate Elasticsearch servers per group.\n\n## The future\n\nWe haven't given up yet! We have high hopes that we'll find ways to lower usage enough to make better search available to all our users.\n\nMeanwhile, we're switching all our engineering time from lowering index usage to improving administration capabilities, as we feel that\nenabling things like selective indexing of projects will allow us to improve our Elasticsearch integration with more confidence, as we will\nbe dogfooding our changes in production.\n\nIf you'd like to follow along with us, feel free to check out the following epics: [gitlab-org&153](https://gitlab.com/groups/gitlab-org/-/epics/153),\n[gitlab-org&429](https://gitlab.com/groups/gitlab-org/-/epics/429), and [gitlab-org&428](https://gitlab.com/groups/gitlab-org/-/epics/428).\nIf you have any concerns, comments, etc. we'll be glad to hear them. Remember, everyone can contribute!\n\nPhoto by [Benjamin Elliott](https://unsplash.com/photos/vc9u77c0LO4) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[749,232,915],{"slug":6459,"featured":6,"template":678},"enabling-global-search-elasticsearch-gitlab-com","content:en-us:blog:enabling-global-search-elasticsearch-gitlab-com.yml","Enabling Global Search Elasticsearch Gitlab Com","en-us/blog/enabling-global-search-elasticsearch-gitlab-com.yml","en-us/blog/enabling-global-search-elasticsearch-gitlab-com",{"_path":6465,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6466,"content":6471,"config":6476,"_id":6478,"_type":16,"title":6479,"_source":17,"_file":6480,"_stem":6481,"_extension":20},"/en-us/blog/application-modernization-examples",{"title":6467,"description":6468,"ogTitle":6467,"ogDescription":6468,"noIndex":6,"ogImage":6409,"ogUrl":6469,"ogSiteName":692,"ogType":693,"canonicalUrls":6469,"schema":6470},"Examples of legacy modernisation projects","Discover how four teams committed to the application modernization process.","https://about.gitlab.com/blog/application-modernization-examples","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Examples of legacy modernisation projects\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-03-14\",\n      }",{"title":6467,"description":6468,"authors":6472,"heroImage":6409,"date":6473,"body":6474,"category":14,"tags":6475},[4535],"2019-03-14","\n\nFine wine and cheese. Whiskey. Paul Rudd. There are a lot of things that get better with age – legacy systems are _not_ one of them.\n\n## The true cost of legacy systems\n\nOver time, the true cost of legacy systems is enormous: from additional resources needed to maintain them, to lost productivity, they can hinder investments in long-term growth. In highly regulated industries, they can even be a financial liability.\n[Health Insurance Portability and Accountability Act (HIPAA) violations in 2018 resulted in over $28 million in fines](https://compliancy-group.com/hipaa-fines-directory-year/), many of them due to data breaches.\nAs legacy systems grow older, it's [easy to miss critical security patches (if any are even available)](https://www.globalscape.com/blog/how-high-risk-legacy-systems-are-hurting-your-business), making your system more vulnerable to malicious actors ready to use old Java and SSL exploits to expose your network.\n\nEven if we can all agree that legacy system modernization is important, it still takes work.\n[Analysis paralysis is a real phenomenon in the digital transformation journey](/blog/beyond-application-modernization-trends/).\nRipping off the band-aid and committing to faster deployment feels overwhelming, and there are so many application modernization trends to consider. But not taking action puts a ceiling on growth.\n\n## Status quo \u003C Innovation\n\nMany large enterprises feel tied down to current practices because there just aren't enough resources left to innovate once legacy systems are maintained.\nFor example, [the greater part of the IT-related federal budget of the United States ($80 billion) goes to maintaining legacy systems.](https://www.spiria.com/en/blog/method-and-best-practices/cost-legacy-systems/)\nWhen large companies can only devote 20 percent of their budget to software modernization, things move even more slowly.\nObsolete systems create a vicious cycle where enterprises feel they have to choose between innovation or keeping things running.\n\nInstead of focusing on a full rip-and-replace of legacy systems, an application modernization strategy that identifies specific challenges reduces potential disruptions.\nMaking goals and achieving them one step at a time can make a big impact.\n\n## How to modernize applications\n\nThese examples of legacy application modernization show how four teams identified challenges, set manageable goals, and decided to [#JustCommit](https://twitter.com/search?q=just+commit) to development efficiency.\n\n### 1. Leveraging microservices\n\nWith a monolithic architecture, everything is developed, deployed, and scaled together.\nWith microservices, each component is broken out and deployed individually as services and the services communicate with each other via API calls.\n[Leveraging microservices allows teams to deploy faster and achieve scale, all at a lower cost](/topics/microservices/).\nAsk Media Group recently participated in a webcast where they discussed their transition from monoliths to microservices leveraging containers, Kubernetes, and AWS.\n\n[Watch the webcast](/webcast/cloud-native-transformation/)\n{: .alert .alert-gitlab-purple}\n\n### 2. Improving automation\n\nEquinix, a leading global data center company with over 180+ colocation facilities across five continents, wanted a solution that would help developers code better and faster, to bring customers new features quickly.\nWhile their old system was fine in the beginning, they needed a more robust solution that could meet their enterprise control and scaling needs. See how Equinix increased the agility of their developers, without sacrificing quality, through automation.\n\n{: .alert .alert-gitlab-purple}\n\n### 3. Simplifying the toolchain\n\nGoldman Sachs, one of the largest financial institutions in the world with over $1.5 trillion in assets, had some challenges in their technology division.\nAs a critical center of the financial provider's business, speed is essential, but a complex toolchain with too many parts was slowing them down.\nIn order to have faster deployment cycles and increase concurrent development, they knew they needed to simplify their toolchain. One cohesive environment helped them improve visibility and efficiency.\n\n[Read the case study](/customers/goldman-sachs/)\n{: .alert .alert-gitlab-purple}\n\n### 4. Reducing lifecycles\n\nChris Hill, Head of Systems Engineering for Infotainment at Jaguar Land Rover, shared his team's journey from feedback loops of 4-6 weeks to _just 30 minutes_ at the DevOps Enterprise Summit London in 2018.\nWho says you need to be stuck with a traditional release cadence?\n\n[Watch the presentation](/blog/chris-hill-devops-enterprise-summit-talk/)\n{: .alert .alert-gitlab-purple}\n\nAre you ready to tackle application modernization? [Just commit.](/blog/application-modernization-best-practices/)\n",[110,894,873],{"slug":6477,"featured":6,"template":678},"application-modernization-examples","content:en-us:blog:application-modernization-examples.yml","Application Modernization Examples","en-us/blog/application-modernization-examples.yml","en-us/blog/application-modernization-examples",{"_path":6483,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6484,"content":6489,"config":6494,"_id":6496,"_type":16,"title":6497,"_source":17,"_file":6498,"_stem":6499,"_extension":20},"/en-us/blog/how-we-used-gitlab-to-automate-our-monthly-retrospectives",{"title":6485,"description":6486,"ogTitle":6485,"ogDescription":6486,"noIndex":6,"ogImage":6312,"ogUrl":6487,"ogSiteName":692,"ogType":693,"canonicalUrls":6487,"schema":6488},"How we use GitLab to automate our monthly retrospectives","How one engineering team is using GitLab CI to automate asynchronous retrospectives, making collaboration across four continents a breeze.","https://about.gitlab.com/blog/how-we-used-gitlab-to-automate-our-monthly-retrospectives","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we use GitLab to automate our monthly retrospectives\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sean McGivern\"}],\n        \"datePublished\": \"2019-03-07\",\n      }",{"title":6485,"description":6486,"authors":6490,"heroImage":6312,"date":6491,"body":6492,"category":14,"tags":6493},[4786],"2019-03-07","\n\nAs an [Engineering\nManager] at GitLab I spend most of\nmy working day using GitLab for a variety of tasks – from using [issue boards](/stages-devops-lifecycle/issueboard/) for team assignments, [epics](https://docs.gitlab.com/ee/user/group/epics/) for tracking longer-term initiatives, and [todos](https://docs.gitlab.com/ee/user/todos.html) and notifications to manage my own workflow.\n\nWe also use GitLab in a number of unconventional ways, so I wanted to share with you one interesting use case we've been experimenting with.\n\n[Engineering Manager]: /handbook/engineering/management/\n\n## GitLab stage group retrospectives\n\nEach [stage group](/stages-devops-lifecycle/) at GitLab has its [own retrospective], which then feeds into the\n[GitLab-wide retrospective] we have for each monthly release.\n\n[own retrospective]: /handbook/engineering/management/group-retrospectives/\n[GitLab-wide retrospective]: /handbook/engineering/workflow/#retrospective\n\nThe [Plan team](/handbook/engineering/development/dev/plan/) is fairly widely\ndistributed: we have people on four continents, and only two members of the team\nare even in the same country as each other. We wanted to try [asynchronous\ncommunication] wherever possible, so we used GitLab issues for [our\nretrospectives], too.\n\nA quick note on terminology: we say [team] to refer to a manager – like me – and\ntheir reports. We say [stage group] to refer to the people who work on a\nparticular [DevOps stage], even across multiple teams. The Plan stage group is\neven more widely distributed.\n{: .note}\n\n[team]: /company/team/structure/#team-and-team-members\n[stage group]: /company/team/structure/#stage-groups\n[DevOps stage]: /handbook/product/categories/#devops-stages\n[asynchronous communication]: /handbook/communication#internal-communication\n[our retrospectives]: https://gitlab.com/gl-retrospectives/plan/issues?label_name[]=retrospective\n\n## Automating retrospective issue creation\n\nCreating the retrospective issue was fast, but adding links to notable\nissues that we shipped or that slipped was time consuming and\ntedious. In the spirit of [xkcd 1319], I decided to automate it, so I\ncreated the [async-retrospectives] project. This project makes\nretrospective issue creation a hands-off process:\n\n[xkcd 1319]: https://xkcd.com/1319/\n[async-retrospectives]: https://gitlab.com/gitlab-org/async-retrospectives\n\n1. It uses [scheduled pipelines] to create an issue on the 1st of each\n   month. As our [development month] runs from the 8th to the 7th, this\n   is a little early, but it allows the team to jot down any thoughts\n   they have while they are still working on the release.\n\n   ![](https://about.gitlab.com/images/blogimages/how-we-used-gitlab-to-automate-our-monthly-retrospectives/scheduled-pipelines.png){: .shadow}\n2. The issue is created using the standard [GitLab API], using a [protected\n   variable] to hold the credentials.\n3. When we create the issue, we use [quick actions] to add the correct\n   labels and due date in a convenient way. (This is also possible\n   without quick actions, but quick actions are more convenient for me\n   personally.)\n4. Another scheduled pipeline runs on the 9th of each month to update\n   the existing issue's description with the lists of issues (slipped,\n   shipped) I mentioned above.\n\n   We make our retrospectives public after we conclude them, so you can see this\n   in action on the [11.8 Plan retrospective]:\n\n   [![](https://about.gitlab.com/images/blogimages/how-we-used-gitlab-to-automate-our-monthly-retrospectives/11-8-plan-retrospective.png){: .shadow}][11.8 Plan retrospective]\n\n[scheduled pipelines]: https://docs.gitlab.com/ee/ci/pipelines/schedules.html\n[development month]: /handbook/engineering/workflow/#product-development-timeline\n[GitLab API]: https://docs.gitlab.com/ee/api/\n[protected variable]: https://docs.gitlab.com/ee/ci/variables/#protected-variables\n[quick actions]: https://docs.gitlab.com/ee/user/project/quick_actions.html\n[11.8 Plan retrospective]: https://gitlab.com/gl-retrospectives/plan/issues/22\n\nI only intended this for use in Plan, but a nice thing about a company where we\n[give agency] to people to solve their problems is that people like me are able\nto try out things that might not work globally, like this.\n\nAs it happened, it's also been [picked up by other teams and groups]. We\nconfigure the creation in a [YAML file], just like GitLab CI is configured, to\ntry to make it as easy as possible for other managers to contribute and set this\nup for their team.\n\n[give agency]: https://handbook.gitlab.com/handbook/values/#give-agency\n[picked up by other teams and groups]: https://gitlab.com/gitlab-org/async-retrospectives/merge_requests?state=merged\n[YAML file]: https://gitlab.com/gitlab-org/async-retrospectives/blob/master/teams.yml\n\n## Our experience running asynchronous retrospectives\n\n### What works\n\nWe've had a lot of positive experiences from these asynchronous\nretrospectives. In particular:\n\n1. No one is disadvantaged because of their time zone. If we had a video call\n   with our time zone spread, we'd have some people on that call in the middle of\n   their night, or missing out completely.\n2. Because they are written down from the start, and because comments in GitLab\n   are linkable, we can very easily refer to specific points in the future.\n3. Also, because they are written down, the comments can include links to\n   specific issues and merge requests to help other people get the same context.\n\n### What needs improvement\n\nAsynchronous retrospectives aren't perfect, of course. Some of the downsides\nwe've noticed are:\n\n1. Video calls are simply better for some things. In particular, the discussion\n   does not flow as smoothly in text as it can in a verbal conversation.\n\n   We also conduct our [engineering-wide retrospective] in a [public video\n   call], so we retain some opportunity for synchronous discussion.\n2. Similarly, team bonding is slower in text than in video calls.\n3. Participation can be lower if it's something you don't have to do right now,\n   but can always defer to a later date. We are continually [looking for ways to improve\n   this].\n\nOver all, we don't intend to go back to video calls for retrospectives,\nand we're really happy with the results. You can see all public\nretrospectives from the teams and groups at GitLab in the [GitLab\nretrospectives group on GitLab.com].\n\n[engineering-wide retrospective]: https://docs.google.com/document/d/1nEkM_7Dj4bT21GJy0Ut3By76FZqCfLBmFQNVThmW2TY/edit\n[public video call]: /2017/02/14/our-retrospective-and-kickoff-are-public/\n[looking for ways to improve this]: https://gitlab.com/gitlab-org/async-retrospectives/issues/12\n[GitLab retrospectives group on GitLab.com]: https://gitlab.com/gl-retrospectives\n\nPhoto by [Daniele Levis Pelusi](https://unsplash.com/photos/Pp9qkEV_xPk?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/automation?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[110,1347,915,727],{"slug":6495,"featured":6,"template":678},"how-we-used-gitlab-to-automate-our-monthly-retrospectives","content:en-us:blog:how-we-used-gitlab-to-automate-our-monthly-retrospectives.yml","How We Used Gitlab To Automate Our Monthly Retrospectives","en-us/blog/how-we-used-gitlab-to-automate-our-monthly-retrospectives.yml","en-us/blog/how-we-used-gitlab-to-automate-our-monthly-retrospectives",{"_path":6501,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6502,"content":6508,"config":6513,"_id":6515,"_type":16,"title":6516,"_source":17,"_file":6517,"_stem":6518,"_extension":20},"/en-us/blog/ios-publishing-with-gitlab-and-fastlane",{"title":6503,"description":6504,"ogTitle":6503,"ogDescription":6504,"noIndex":6,"ogImage":6505,"ogUrl":6506,"ogSiteName":692,"ogType":693,"canonicalUrls":6506,"schema":6507},"How to publish iOS apps to the App Store with GitLab and fastlane","See how GitLab, together with fastlane, can build, sign, and publish apps for iOS to the App Store.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680470/Blog/Hero%20Images/ios-publishing-cover.jpg","https://about.gitlab.com/blog/ios-publishing-with-gitlab-and-fastlane","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to publish iOS apps to the App Store with GitLab and fastlane\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jason Yavorska\"}],\n        \"datePublished\": \"2019-03-06\",\n      }",{"title":6503,"description":6504,"authors":6509,"heroImage":6505,"date":6510,"body":6511,"category":14,"tags":6512},[4945],"2019-03-06","\n\n_Note: You may also find the blog post [Tutorial: iOS CI/CD with GitLab](/blog/ios-cicd-with-gitlab/) from June 2023 helpful._\n\nRecently we published a [blog post\ndetailing how to get up and running quickly with your Android app](/blog/android-publishing-with-gitlab-and-fastlane/), GitLab, and\n[_fastlane_](https://fastlane.tools). In this edition, let's look at how to get\na build of an iOS app up and running, including publishing all the way to\nTestFlight. To see how cool this can be, check out this [video\nof me making a change on an iPad Pro using the GitLab Web IDE](https://www.youtube.com/watch?v=325FyJt7ZG8), getting that\nbuilt, and then receiving an update to the test version of my application on the\nvery same iPad Pro I was using to develop.\n\nFor the purposes of this article, we'll be using a [simple Swift iOS app](https://gitlab.com/jyavorska/flappyokr)\nthat I recorded the video with.\n\n## First, a note on Apple Store configuration\n\nWhat we're going to need in order to set all of this up is a mobile application set up\nin the App Store, distribution certificates, and a provisioning profile that ties\nit all together.\n\nMost of the complexity here actually has to do with setting up your signing\nauthority for the App Store. Hopefully in most cases this is already good to go\nfor you; if you're a new app developer, I'll try to get you started on the right\ntrack, but the intricacies of Apple certificate management is out of the scope of\nthis article, and tends to change somewhat frequently. But, this information\nshould get you going.\n\n### My apps\n\nYour application will need to be set up in App Store Connect so you have an ID\nfor your application, which will be used in your `.xcodebuild` configuration.\nYour app profile and ID are what tie together the code builds with pricing and\navailability, as well as TestFlight configuration for distributing testing\napplications to your users. Note that you don't need to set up public testing –\nyou can use personal testing with TestFlight just fine as long as your testing\ngroup is small, and the setup is simpler and requires no additional approvals\nfrom Apple.\n\n### Provisioning profile\n\nIn addition to the app setup, you need iOS distribution and development keys\ncreated in the Certificates, Identifiers, and Profiles section of the Apple\nDeveloper console. Once these certificates are created, you can create a\nprovisioning profile to unify everything.\n\nAlso note that the user you will authenticate with needs to be able to create\ncertificates, so please ensure that they have that ability or you will see an\nerror during the [_cert_ and _sigh_](https://docs.fastlane.tools/codesigning/getting-started/#using-cert-and-sigh)\nsteps.\n\n### Other options\n\nThere are several more ways to set up your certificates and profiles than the\nsimple method I've described above, so if you're doing something different you may\nneed to adapt. The most important thing is that you need your `.xcodebuild`\nconfiguration to point to the appropriate files, and your keychain needs to be\navailable on the build machine for the user that the runner is running as. We're\nusing _fastlane_ for signing, so if you run into trouble here or want to learn\nmore about your options, take a look at their extensive [code signing documentation](https://docs.fastlane.tools/codesigning/getting-started/).\n\nFor this sample project, I'm using the [_cert_ and _sigh_](https://docs.fastlane.tools/codesigning/getting-started/#using-cert-and-sigh)\napproach, but the [match\napproach](https://docs.fastlane.tools/codesigning/getting-started/#using-match) may be better for actual enterprise use.\n\n## How to set up GitLab and _fastlane_\n\n### How to set up your CI/CD runner\n\nWith the above information gathered or set up, we can start with configuring the\nGitLab runner on a macOS device. Unfortunately, building on macOS is the only\nrealistic way to build iOS apps. This is potentially changing in the future;\nkeep an eye on projects like [xcbuild](https://github.com/facebook/xcbuild) and\n[isign](https://github.com/saucelabs/isign), as well as our own internal issue\n[gitlab-ce#57576](https://gitlab.com/gitlab-org/gitlab-ce/issues/57576) for\ndevelopments in this area.\n\nIn the meantime, setting up the runner is fairly straightforward. You can follow\nour most current [instructions for setting up GitLab Runner on macOS](https://docs.gitlab.com/runner/install/osx.html)\nto get that up and running.\n\nNote: Be sure to set your GitLab runner to use the `shell` executor. For building iOS on\nmacOS, it's a requirement to operate directly as the user on the machine rather\nthan using containers. Note that when you're using the shell executor, the\nbuild and tests run as the identity of the runner logged in user, directly on\nthe build host. This is less secure than using container executors, so please\ntake a look at our [security implications documentation](https://docs.gitlab.com/runner/security/#usage-of-shell-executor)\nfor additional detail on what to keep in mind in this scenario.\n\n```\nsudo curl --output /usr/local/bin/gitlab-runner https://gitlab-runner-downloads.s3.amazonaws.com/latest/binaries/gitlab-runner-darwin-amd64\nsudo chmod +x /usr/local/bin/gitlab-runner\ncd ~\ngitlab-runner install\ngitlab-runner start\n```\n\nWhat you need to be careful about here is ensuring your Apple keychain is set up\non this host and has access to the keys that Xcode needs in order\nto build. The easiest way to test this is to log in as the user that will be\nrunning the build and try to build manually. You may receive system prompts for\nkeychain access which you need to \"always allow\" for CI/CD to work. You will probably\nalso want to log in and watch your first pipeline or two to make sure that\nno prompts come up for additional keychain access. Unfortunately Apple does not\nmake this super easy to use in unattended mode, but once you have it working it\ntends to stay that way.\n\n### _fastlane_ init\n\nIn order to start using _fastane_ with your project, you'll need to run\n`fastlane init`. Simply follow the [instructions\nto install and run _fastlane_](https://docs.fastlane.tools/getting-started/ios/setup/), being sure to use the instructions in the\n[Use a Gemfile](https://docs.fastlane.tools/getting-started/ios/setup/#use-a-gemfile)\nsection, since we do want this to run quickly and predictably via unattended CI.\n\nFrom your project directory, you can run the following commands:\n\n```\nxcode-select --install\nsudo gem install fastlane -NV\n# Alternatively using Homebrew\n# brew cask install fastlane\nfastlane init\n```\n\n_fastlane_ will ask you for some basic configuration and then create a project folder\ncalled `fastlane` in your project which will contain three files:\n\n#### 1. `fastlane/Appfile`\n\nThis file is straightforward, so you just want to check to make sure that the Apple\nID and app ID that you set up earlier are correct.\n\n```\napp_identifier(\"com.vontrance.flappybird\") # The bundle identifier of your app\napple_id(\"your-email@your-domain.com\") # Your Apple email address\n```\n\n#### 2. `fastlane/Fastfile`\n\nThe `Fastfile` defines the build steps. Since we're using a lot of the built-in\ncapability of _fastlane_ this is really straightforward. We create a single\nlane which gets certificates, builds, and uploads the new build to TestFlight.\nOf course, you may want to split these out into different jobs depending on your\nuse case. Each of these steps, `get_certificates`, `get_provisioning_profile`,\n`gym`, and `upload_to_testflight` are pre-bundled actions already included with\n_fastlane_.\n\n`get_certificates` and `get_provisioning_profile` are actions associated with\nthe [_cert_ and _sigh_](https://docs.fastlane.tools/codesigning/getting-started/#using-cert-and-sigh)\napproach to code signing; if you're using _fastlane_ [match](https://docs.fastlane.tools/codesigning/getting-started/#using-match)\nor some other approach you may need to update these.\n\n```yaml\ndefault_platform(:ios)\n\nplatform :ios do\n  desc \"Build the application\"\n  lane :flappybuild do\n    get_certificates\n    get_provisioning_profile\n    gym\n    upload_to_testflight\n  end\nend\n```\n\n#### 3. `fastlane/Gymfile`\n\nThis `gym` file is optional, but I created it manually in order to override the default\noutput directory and place the output in the current folder. This makes things a\nbit easier for CI. You can read more about `gym` and its options in the\n[gym documentation](https://docs.fastlane.tools/actions/gym/).\n\n```yaml\noutput_directory(\"./\")\n```\n\n### Our `.gitlab-ci.yml` configuration file\n\nNow, we have a CI/CD runner associated with our project so we're ready to try a\npipeline. Let's see what's in our `.gitlab-ci.yml` file:\n\n```yaml\nstages:\n  - build\n\nvariables:\n  LC_ALL: \"en_US.UTF-8\"\n  LANG: \"en_US.UTF-8\"\n  GIT_STRATEGY: clone\n\nbuild:\n  stage: build\n  script:\n    - bundle install\n    - bundle exec fastlane flappybuild\n  artifacts:\n    paths:\n    - ./FlappyBird.ipa\n```\n\nYes, that's really it! [We set UTF-8 locale for _fastlane_ per their\nrequirements](https://docs.fastlane.tools/getting-started/ios/setup/#set-up-environment-variables),\nuse a `clone` strategy with the `shell` executor to ensure we have a clean\nworkspace each build, and then simply call our `flappybuild` _fastlane_ target,\nwhich we discussed above. This will build, sign, and deploy the latest build to\nTestFlight.\n\nWe also gather the artifact and save it with the build – note that the `.ipa`\nformat output is a signed ARM executable, so not something you can run in the\nsimulator. If you wanted a simulator output to be saved with the build, you\nwould simply add a build target that produces it and then add it to the artifact\npath.\n\n### Other environment variables\n\nThere are some special environment variables behind the scenes here that are\nmaking this work.\n\n#### `FASTLANE_APPLE_APPLICATION_SPECIFIC_PASSWORD` and `FASTLANE_SESSION`\n\nIn order to authenticate against the App Store for the TestFlight upload,\n_fastlane_ must be able to authenticate. In order to do this, you need to\ncreate an app-specific password to be used by CI. You can read more about this\nprocess in [this documentation](https://docs.fastlane.tools/best-practices/continuous-integration/#use-of-application-specific-passwords-and-spaceauth).\n\nIf you're using two-factor authentication, you'll also need to generate the\n`FASTLANE_SESSION` variable – instructions are in the same place.\n\n#### `FASTLANE_USER` and `FASTLANE_PASSWORD`\n\nIn order for [_cert_ and _sigh_](https://docs.fastlane.tools/codesigning/getting-started/#using-cert-and-sigh)\nto be able to fetch the provisioning profile and certificates on demand, the\n`FASTLANE_USER` and `FASTLANE_PASSWORD` variables must be set. You can read more\nabout this [here](https://docs.fastlane.tools/best-practices/continuous-integration/#environment-variables-to-set).\nYou may not need these if you are using some other approach to signing.\n\n## In closing...\n\nRemember, you can see a working project with all of this set up by heading over\nto my [simple demo app](https://gitlab.com/jyavorska/flappyokr).\n\nHopefully this has been helpful and has inspired you to get iOS builds and\npublishing working within your GitLab project. There is some good additional\n[CI/CD best-practice](https://docs.fastlane.tools/best-practices/continuous-integration/)\ndocumentation for _fastlane_ if you get stuck anywhere,\nand you could also consider using the `CI_BUILD_ID` (which increments each build)\nto [automatically increment a version](https://docs.fastlane.tools/best-practices/continuous-integration/gitlab/#auto-incremented-build-number).\n\nAnother great capability of _fastlane_ to try is the ability to\n[automatically generate screenshots](https://docs.fastlane.tools/getting-started/ios/screenshots/)\nfor the App Store – it's just as easy to set up as the rest of this has been.\n\nWe'd love to hear in the comments how this is working for you, as well as your\nideas for how we can make GitLab a better place to do iOS development in general.\n\nPhoto by eleven_x on [Unsplash](https://unsplash.com/photos/lwaw_DL09S4)\n{: .note}\n",[110,232,749],{"slug":6514,"featured":6,"template":678},"ios-publishing-with-gitlab-and-fastlane","content:en-us:blog:ios-publishing-with-gitlab-and-fastlane.yml","Ios Publishing With Gitlab And Fastlane","en-us/blog/ios-publishing-with-gitlab-and-fastlane.yml","en-us/blog/ios-publishing-with-gitlab-and-fastlane",{"_path":6520,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6521,"content":6526,"config":6532,"_id":6534,"_type":16,"title":6535,"_source":17,"_file":6536,"_stem":6537,"_extension":20},"/en-us/blog/merging-ce-and-ee-codebases",{"title":6522,"description":6523,"ogTitle":6522,"ogDescription":6523,"noIndex":6,"ogImage":6019,"ogUrl":6524,"ogSiteName":692,"ogType":693,"canonicalUrls":6524,"schema":6525},"GitLab might move to a single Rails codebase","We're considering moving towards a single Rails repository by combining the two existing repositories – here's why, and what would change.","https://about.gitlab.com/blog/merging-ce-and-ee-codebases","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab might move to a single Rails codebase\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Marin Jankovski\"}],\n        \"datePublished\": \"2019-02-21\",\n      }",{"title":6522,"description":6523,"authors":6527,"heroImage":6019,"date":6529,"body":6530,"category":14,"tags":6531},[6528],"Marin Jankovski","2019-02-21","\n\n## A single repository with no license changes\n\nBefore we go into the details of the proposed changes, we want to stress that:\n\n* GitLab Community Edition code would remain open source and MIT licensed.\n* GitLab Enterprise Edition code would remain source available and proprietary.\n\n## What are the challenges with having two repositories?\n\nCurrently the Ruby on Rails code of GitLab (the majority of the codebase) are maintained in two repositories.\nThe [gitlab-ce] repository for the code with an open source license and the [gitlab-ee] repository containing code with a proprietary license which is source available.\n\nFeature development is difficult and error prone when making any change at GitLab in two similar yet separate repositories that depend on one another.\n\nBelow are a few examples to demonstrate the problem:\n\n### Duplicated work during feature development\n\nThis [frontend only Merge Request](https://gitlab.com/gitlab-org/gitlab-ee/merge_requests/7376) required a [backport to CE repository](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/22158). Backporting included creating duplicate work to avoid future conflicts as well as changes to the code to support the feature.\n\n### A simple change can break master\n\nA simple [change in a spec in CE repository](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/24801)\nfailed the [pipeline in the master branch](https://gitlab.com/gitlab-org/gitlab-ee/issues/9621). After hours of investigation, an [MR reverting the change](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/24961) was created, as well as a [second to address the problem](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/24983).\n\n### Conflicts during preparation for regular releases\n\n This concerns preparation for a regular release, e.g. [11.7.5 release](https://gitlab.com/gitlab-org/release/tasks/issues/659). Merge requests preparing the release for both the [CE repository](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/24941) and [EE repository](https://gitlab.com/gitlab-org/gitlab-ee/merge_requests/9441) need to be created and once the pipelines pass, the EE repository requires a merge from the CE repository. This causes additional conflicts, pipeline failures, and similar delays requiring more manual intervention during which the CE distribution release is also delayed.\n\nBetween these three examples, _days_ of engineering time has been spent on [busy work], delaying the delivery of work that brings actual value. Only three examples are highlighted, but this type of work occurs daily.\nWhether writing a new feature available in Core, or any of the enterprise plans, all are equally affected.\n\nMore details on the workflows and challenges can be found in the [working in CE and EE codebases blueprint] document.\n\n## What have we done to improve the situation?\n\nWe've invested significant development time to try and keep the two repositories separate:\n\n### Pre-2016: Manual merges for each release\n\n Prior to 2016, merging the CE repository into the EE repository was done when we were ready to cut a release; the number of commits was small so this could be done by one person.\n\n### 2016-2017: Daily merges by a team of developers\n\nIn 2016, the number of commits between the two repositories grew so the task was divided between seven (brave) developers responsible for merging the code once a day. This worked for a while until delays started happening due to failed specs or difficult merge conflicts.\n\n### 2017-2018: Automated merges every three hours\n\nAt the end of 2017, we merged an [MR that allowed the creation of automated MRs between the two repositories](https://gitlab.com/gitlab-org/release-tools/merge_requests/86), mentioning individuals to resolve conflicts. This task ran every three hours, allowing for a smaller number of commits to be worked on. You can read more about our [automated CE to EE merge here](/blog/using-gitlab-ci-to-build-gitlab-faster/).\n\n### Present: Further automation with Merge Train\n\nBy the end of 2018, the number of changes going into both the CE and EE repositories grew to thousands of commits in some cases, which made the automated MR insufficient. The [Merge Train](https://gitlab.com/gitlab-org/merge-train) tool was created to automate these workflows further, by automatically rejecting merge conflicts and preferring changes from one repository over the other. The edge cases we've encountered are requiring us to invest additional time in improving the custom tool.\n\nThis last attempt turned out to be a bit of a crossroads. Do we invest more development time in improving the custom tooling, knowing that we will never get it 100 percent right, or do we need to take some more drastic measures that are going to save countless hours of development time?\n\n## What are we proposing?\n\nOne of GitLab's core [values] is efficiency. As previously mentioned, merging the [gitlab-ce] Rails repository into the [gitlab-ee] Rails repository is proving to be inefficient.\n\nThe Rails repository is one of many base repositories of which GitLab consists. The [gitlab-ce] repository is a part of a [gitlab-ce distribution] package which offers only the Core [feature set]. Similarly, the [gitlab-ee] repository is part of a [gitlab-ee distribution] package which has a larger feature set available. See the image below:\n\n![CE-EE-Before](https://about.gitlab.com/images/blogimages/merging-ce-and-ee-codebases/community-enterprise-before.png){: .medium.center}\n\nThe change we are proposing would merge the [gitlab-ce] and [gitlab-ee] repositories into a single [gitlab] repository. This change is reflected below:\n\n![CE-EE-After](https://about.gitlab.com/images/blogimages/merging-ce-and-ee-codebases/community-enterprise-after.png){: .medium.center}\n\nThe [design for merging two codebases] outlines the required work and process changes in detail. The proposed change would pertain only to the Ruby on Rails repository, and I've summarized it below.\n\n### So, what changes?\n\n* The [gitlab-ce] and [gitlab-ee] repositories are replaced with a single [gitlab] repository, with all open issues and merge requests moved into the single repository.\n* All frontend assets (JavaScript, CSS, images, views) will be open sourced under the MIT license.\n* All proprietary backend code is located in the `/ee` repository.\n* All documentation is merged together and clearly states which features belong to which [feature set]. Documentation is [already licensed under CC-BY-SA](https://gitlab.com/gitlab-org/gitlab-ce/issues/42891).\n\n### What remains unchanged?\n\n* The [gitlab-ce distribution] package remains fully open source under the same license.\n* All code outside of the `/ee` directory in the single [gitlab] repository is open source.\n* All code in the `/ee` directory remains proprietary with source code available.\n* Other projects, such as [gitlab-shell], [gitaly], [gitlab-workhorse], [gitlab-pages], remain unchanged.\n\n### What are the possible downsides?\n\nWe want to be clear about the possible downsides of this approach:\n\n* Users with installations from source currently cloning the [gitlab-ce] repository would download from a new repository named [gitlab]. The clone will also fetch the proprietary code in `/ee` directory, but removing this directory has no effect on running application.\n\n     ➡️ This is resolved by removing the `/ee` directory after cloning.\n* [gitlab-ce distribution] users would get more database tables because of the new tables in `db/schema.rb`. Database schema is open source and in the [gitlab-ce distribution] these new tables would not be populated, affect performance, or take significant space.\n\n     ➡️ All database migration code is open source and does not add additional maintenance burden, so no additional work is required.\n\n## What's next?\n\nWe currently think that the efficiency gains and clearer naming outweighs these disadvantages. Our [stewardship of GitLab](/company/stewardship/) is an important aspect of GitLab's success as a whole, so we would love to know:\n\n* Is there a better way to accomplish to solve the problem of the [busy work]?\n* What improvements can we make to our proposal?\n* Are there any additional considerations that we should take into account?\n\nWe invite you to share your suggestions in [issue 2952](https://gitlab.com/gitlab-org/gitlab-ee/issues/2952), which was an inspiration for the proposal as it currently stands. We look forward to hearing your thoughts!\n\nCover image from [Unsplash](https://images.unsplash.com/photo-1512217536414-d92543c79ca1)\n{: .note}\n\n[values]: https://handbook.gitlab.com/handbook/values/\n[gitlab-ce]: https://gitlab.com/gitlab-org/gitlab-ce\n[gitlab-ce distribution]: https://packages.gitlab.com/gitlab/gitlab-ce\n[gitlab-ee distribution]: https://packages.gitlab.com/gitlab/gitlab-ee\n[gitlab-ee]: https://gitlab.com/gitlab-org/gitlab-ee\n[gitlab]: https://gitlab.com/gitlab-org/gitlab\n[gitlab-shell]: https://gitlab.com/gitlab-org/gitlab-shell\n[gitaly]: https://gitlab.com/gitlab-org/gitaly\n[gitlab-workhorse]: https://gitlab.com/gitlab-org/gitlab-workhorse\n[gitlab-pages]: https://gitlab.com/gitlab-org/gitlab-pages\n[feature set]: /pricing/feature-comparison/\n[busy work]: https://en.wikipedia.org/wiki/Busy_work\n[working in CE and EE codebases blueprint]: https://gitlab.com/gitlab-com/gl-infra/readiness/-/tree/master/library/ce-ee-codebases\n[design for merging two codebases]: https://gitlab.com/gitlab-com/gl-infra/readiness/-/tree/master/library/merge-ce-ee-codebases\n",[915,268,278,703],{"slug":6533,"featured":6,"template":678},"merging-ce-and-ee-codebases","content:en-us:blog:merging-ce-and-ee-codebases.yml","Merging Ce And Ee Codebases","en-us/blog/merging-ce-and-ee-codebases.yml","en-us/blog/merging-ce-and-ee-codebases",{"_path":6539,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6540,"content":6546,"config":6551,"_id":6553,"_type":16,"title":6554,"_source":17,"_file":6555,"_stem":6556,"_extension":20},"/en-us/blog/start-using-pages-quickly",{"title":6541,"description":6542,"ogTitle":6541,"ogDescription":6542,"noIndex":6,"ogImage":6543,"ogUrl":6544,"ogSiteName":692,"ogType":693,"canonicalUrls":6544,"schema":6545},"New: How to get up and running quickly using GitLab Pages templates","We're introducing bundled GitLab Pages templates, so let's take a look at how easy it really is now to get up and running with a new site.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679908/Blog/Hero%20Images/pages-templates-cover-image.jpg","https://about.gitlab.com/blog/start-using-pages-quickly","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"New: How to get up and running quickly using GitLab Pages templates\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jason Yavorska\"}],\n        \"datePublished\": \"2019-02-20\",\n      }",{"title":6541,"description":6542,"authors":6547,"heroImage":6543,"date":6548,"body":6549,"category":14,"tags":6550},[4945],"2019-02-20","\n\nHello everyone, my name is Jason Yavorska and I'm the product manager for the [Release stage](/stages-devops-lifecycle/release/) here at GitLab, which includes GitLab Pages. In our [GitLab 11.8 release (March 2019) we're introducing](https://gitlab.com/gitlab-org/gitlab-ce/issues/47857) a quick way to select from our most popular [Pages templates](https://gitlab.com/pages?sort=stars_desc) directly from the new project setup screen. If you use GitLab.com, you can take advantage of this feature already! It looks a bit like this:\n\n![Pages Templates View](https://about.gitlab.com/images/blogimages/pages-templates-view.png){: .shadow.medium.center}\n\nNow, instead of having to fork an existing template, you can simply select one of the bundled ones and get going right away. If you're interested in one of the other templates, you can still create those in the old way – check out the [existing documentation on how to fork a template](https://docs.gitlab.com/ee/user/project/pages/index.html#fork-a-project-to-get-started-from).\n\nIn this article I'm going to show you just how effortless all of this can be. But first:\n\n## My experience contributing GitLab Pages templates\n\nFirst, though, I'd be remiss if I didn't mention that I contributed this change myself (with the help of a few key supporting players, of course.) Now, you may be wondering: I thought you were a product manager at GitLab? Not a developer? Well, that's absolutely true, but I am a hobbyist programmer on the side. I've contributed a small change here or there on my own time, but this was the largest, most complex thing that I've ever contributed myself.\n\nI always find in these situations that contributing is in some ways easier than you expect, and in some ways more challenging. Getting the code working was actually surprisingly straightforward: I was able to get our GDK ([GitLab Development Kit](https://gitlab.com/gitlab-org/gitlab-development-kit/blob/master/README.md)) up and running with minimal hassle, and then was able to iterate quickly until I found a working solution. Most of my challenges ended up being around getting the change through our review process and into the release. There's a lot you have to learn there, and I think it just takes some time and practice in order to have it all click. What was truly amazing, though, was all the friendly people who jumped in to help me along the way. I learned so much and am so proud of how everything came together in the end.\n\nIf you're considering making your first contribution, feel free to reach out to me on Twitter ([@j4yav](https://twitter.com/j4yav)) and I'll be happy to help guide you in the right direction. Contributing to open source is a great feeling, big or small, and if you haven't tried it before you should really give it a go.\n\n## Now let's set up a site!\n\nWith that out of the way, let's see this in action to appreciate just how painless it really is to set up a new site in GitLab pages now.\n\nThe video below walks through the steps, with full instructions underneath.\n\n Note that if you're using a private on-premise version of GitLab, be sure to check with your administrator to ensure that Pages is enabled. You may need to adjust some of the URLs in the setup below depending on your site configuration.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://youtube.com/embed/C2E1M-4Jvd0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### 1. Create the new project\n\nFor this example, we'll use the [Hugo](https://gohugo.io/) template, our most popular one. Simply go to the GitLab home page, and select \"New Project\" from the top right. Click on \"Create from template,\" click on the Hugo template, and then click on \"Use template.\" Give it a name like `namespace.gitlab.io`, where `namespace` is your `username` or `groupname`.\n\n### 2. Run your first pipeline\n\nWe need to make one quick edit, which will naturally kick off a pipeline and deploy our site for the first time. What we need to do is edit our `config.toml` to have the same URL that we set up in the project name. To do this we will go to Repository → Files, click on the `config.toml` file, and then click on \"Edit\" in the toolbar. All we need to do is change the `baseurl = \"https://pages.gitlab.io/hugo/\"` line to `baseurl = \"https://namespace.gitlab.io/\"` (again, replacing `namespace` with your `username` or `groupname`).\n\nCommit your changes, then head over to CI/CD → Pipelines and look for the new pipeline that's running. You can click on the status to see the build log, or just wait for it to finish – you might be surprised at how fast this is! Once the pipeline passes, we're good to go. It may take a minute or two for everything to work through replication, but once it does, you can see your new site at `https://namespace.gitlab.io/`, beautiful template included, just waiting for you to customize further.\n\n### 3. Where to go next\n\nThere's a lot of basic configuration for your site in the `config.toml`, check that out and see what you might like to modify. The about page is in `/content/page/about.md`, and you can see example posts for your blog in `/content/post` – feel free to delete these when you're done with them. Since these are written in [markdown](https://docs.gitlab.com/ee/user/markdown.html) they are a piece of cake to edit or add new ones. Getting started with Hugo is a bit out of scope for this post, but I assure you it's quite straightforward. You can check out the [Hugo getting started pages](https://gohugo.io/getting-started/) for more ideas on what you can do. Be sure also to check out [Hugo themes](https://gohugo.io/themes/) if you're looking for inspiration.\n\nHopefully this was helpful in getting you started. Good luck with your new site!\n\nCover image by José Alejandro Cuffia(https://unsplash.com/@alecuffia) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[2932,749,915,703],{"slug":6552,"featured":6,"template":678},"start-using-pages-quickly","content:en-us:blog:start-using-pages-quickly.yml","Start Using Pages Quickly","en-us/blog/start-using-pages-quickly.yml","en-us/blog/start-using-pages-quickly",{"_path":6558,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6559,"content":6565,"config":6571,"_id":6573,"_type":16,"title":6574,"_source":17,"_file":6575,"_stem":6576,"_extension":20},"/en-us/blog/delayed-replication-for-disaster-recovery-with-postgresql",{"title":6560,"description":6561,"ogTitle":6560,"ogDescription":6561,"noIndex":6,"ogImage":6562,"ogUrl":6563,"ogSiteName":692,"ogType":693,"canonicalUrls":6563,"schema":6564},"How we used delayed replication for disaster recovery with PostgreSQL","Replication is no backup. Or is it? Let's take a look at delayed replication and how we used it to recover from accidental label deletion.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683349/Blog/Hero%20Images/mathew-schwartz-397471-unsplash.jpg","https://about.gitlab.com/blog/delayed-replication-for-disaster-recovery-with-postgresql","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we used delayed replication for disaster recovery with PostgreSQL\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andreas Brandl\"}],\n        \"datePublished\": \"2019-02-13\",\n      }",{"title":6560,"description":6561,"authors":6566,"heroImage":6562,"date":6568,"body":6569,"category":14,"tags":6570},[6567],"Andreas Brandl","2019-02-13","\nThe [infrastructure team](/handbook/engineering/infrastructure/) at GitLab is responsible for the operation of [GitLab.com](https://gitlab.com/), the largest GitLab instance in existence: With about 3 million users and nearly 7 million projects, it is one of the largest single-tenancy, open source SaaS sites on the internet. The PostgreSQL database system is a critical part of the infrastructure that powers GitLab.com and we employ various strategies to provide resiliency against all kinds of data-loss-inducing disasters. Those are highly unlikely of course, but we are well prepared with backup and replication mechanisms to recover from these scenarios.\n\nIt's a misconception to think of replication as a means to back up a database ([see below](#summing-up)). However, in this post, we're going to explore the power of delayed replication to recover data after an accidental deletion: On [GitLab.com](https://gitlab.com), a user [deleted a label](https://gitlab.com/gitlab-com/gl-infra/production/issues/509) for the [`gitlab-ce`](https://gitlab.com/gitlab-org/gitlab-ce/) project, thereby also losing the label's association with merge requests and issues.\n\nWith a delayed replica in place, we were able to recover and restore that data in under 90 minutes. We'll look into that process and how delayed replication helped to achieve this.\n\n### Point-in-time recovery with PostgreSQL\n\nPostgreSQL comes with a built-in feature to recover the state of a database to a certain point in time. This is called *[Point-in-Time Recovery](https://www.postgresql.org/docs/current/continuous-archiving.html)* (PITR), which leverages the same mechanics that are used to keep a replica up to date: Starting from a consistent snapshot of the whole database cluster (a *basebackup*), we apply the sequence of changes to the database state until a certain point in time has been reached.\n\nIn order to use this feature for a cold backup, we regularly take a basebackup of the database and store this in the *archive* (at GitLab, we keep the archive in [Google Cloud Storage](https://cloud.google.com/storage/)). Additionally, we keep track of changes to the database state by archiving the [*write-ahead log*](https://www.postgresql.org/docs/current/wal-intro.html) (WAL). With that in place, we can perform PITR to recover from a disaster: Start with a snapshot that was taken before the disaster happened and apply changes from the WAL archive until right before the disastrous event.\n\n### What is delayed replication?\n\n*Delayed replication* is the idea of applying time-delayed changes from the WAL. That is, a transaction that is committed at physical time `X` is only going to be visible on a replica with delay `d` at time `X + d`.\n\nFor PostgreSQL, there are two ways of setting up a physical replica of the database: *Archive recovery* and *streaming replication*. [Archive recovery](https://www.postgresql.org/docs/11/archive-recovery-settings.html) essentially works like PITR but in a continuous way: We keep retrieving changes from the WAL archive and apply them to the replica state in a continuous fashion. On the other hand, [streaming replication](https://wiki.postgresql.org/wiki/Streaming_Replication) directly retrieves the WAL stream from an upstream database host. We prefer archive recovery for delayed replication because it is simpler to manage and delivers an adequate level of performance to keep up with the production cluster.\n\n### How to set up delayed archive recovery\n\nConfiguration of [recovery options](https://www.postgresql.org/docs/11/recovery-config.html) mostly go into `recovery.conf`. Here's an example:\n\n```\nstandby_mode = 'on'\nrestore_command = '/usr/bin/envdir /etc/wal-e.d/env /opt/wal-e/bin/wal-e wal-fetch -p 4 \"%f\" \"%p\"'\nrecovery_min_apply_delay = '8h'\nrecovery_target_timeline = 'latest'\n```\n\nWith these settings in place, we have configured a delayed replica with archive recovery. It uses [wal-e](https://github.com/wal-e/wal-e) to retrieve WAL segments (`restore_command`) from the archive and delays application of changes by eight hours (`recovery_min_apply_delay`). The replica is going to follow any timeline switches present in the archive, e.g. caused by a failover in the cluster (`recovery_target_timeline`).\n\nIt is possible to configure streaming replication with a delay using `recovery_min_apply_delay`. However, there are a few pitfalls regarding replication slots, hot standby feedback, and others that one needs to be aware of. In our case, we avoid them by replicating from the WAL archive instead of using streaming replication.\n\nIt is worth noting that `recovery_min_apply_delay` was only introduced in PostgreSQL 9.4. However, in previous versions, a delayed replica is typically implemented with a combination of [recovery management functions](https://www.postgresql.org/docs/9.3/functions-admin.html) (`pg_xlog_replay_pause(), pg_xlog_replay_resume()`) or by withholding WAL segments from the archive for the duration of the delay.\n\n### How does PostgreSQL implement it?\n\nIt is particularly interesting to see how PostgreSQL implements delayed recovery. So let's look at [`recoveryApplyDelay(XlogReaderState)`](https://gitlab.com/postgres/postgres/blob/c24dcd0cfd949bdf245814c4c2b3df828ee7db36/src/backend/access/transam/xlog.c#L6124) below. It is called from the [main redo apply loop](https://gitlab.com/postgres/postgres/blob/c24dcd0cfd949bdf245814c4c2b3df828ee7db36/src/backend/access/transam/xlog.c#L7196) for each record read from WAL.\n\n```c\nstatic bool\nrecoveryApplyDelay(XLogReaderState *record)\n{\n\tuint8\t\txact_info;\n\tTimestampTz xtime;\n\tlong\t\tsecs;\n\tint\t\t\tmicrosecs;\n\n\t/* nothing to do if no delay configured */\n\tif (recovery_min_apply_delay \u003C= 0)\n\t\treturn false;\n\n\t/* no delay is applied on a database not yet consistent */\n\tif (!reachedConsistency)\n\t\treturn false;\n\n\t/*\n\t * Is it a COMMIT record?\n\t *\n\t * We deliberately choose not to delay aborts since they have no effect on\n\t * MVCC. We already allow replay of records that don't have a timestamp,\n\t * so there is already opportunity for issues caused by early conflicts on\n\t * standbys.\n\t */\n\tif (XLogRecGetRmid(record) != RM_XACT_ID)\n\t\treturn false;\n\n\txact_info = XLogRecGetInfo(record) & XLOG_XACT_OPMASK;\n\n\tif (xact_info != XLOG_XACT_COMMIT &&\n\t\txact_info != XLOG_XACT_COMMIT_PREPARED)\n\t\treturn false;\n\n\tif (!getRecordTimestamp(record, &xtime))\n\t\treturn false;\n\n\trecoveryDelayUntilTime =\n\t\tTimestampTzPlusMilliseconds(xtime, recovery_min_apply_delay);\n\n\t/*\n\t * Exit without arming the latch if it's already past time to apply this\n\t * record\n\t */\n\tTimestampDifference(GetCurrentTimestamp(), recoveryDelayUntilTime,\n\t\t\t\t\t\t&secs, &microsecs);\n\tif (secs \u003C= 0 && microsecs \u003C= 0)\n\t\treturn false;\n\n\twhile (true)\n\t{\n        // Shortened:\n        // Use WaitLatch until we reached recoveryDelayUntilTime\n        // and then\n        break;\n\t}\n\treturn true;\n}\n```\n\nThe takeaway here is that the delay is based on the physical time that was recorded with the commit timestamp of the transaction (`xtime`). We can also see that the delay is only applied to commit records but not to other types of records: Any data changes are directly applied but the corresponding commit is delayed, so these changes only become visible after the configured delay.\n\n### How to use a delayed replica to recover data\n\nLet's say we have a production database cluster and a replica with eight hours of delay. How do we use this to recover data? Let's look at how this worked in the case of the [accidental label deletion](https://gitlab.com/gitlab-com/gl-infra/production/issues/509).\n\nAs soon as we were aware of the incident, we [paused archive recovery](https://www.postgresql.org/docs/9.3/functions-admin.html) on the delayed replica:\n\n```sql\nSELECT pg_xlog_replay_pause();\n```\n\nPausing the replica eliminated the risk of the replica replaying the `DELETE` query. This is useful if you need more time to investigate.\n\nThe recovery approach is to let the delayed replica catch up until right before the point the `DELETE` query occurred. In our case we knew roughly the physical time of the `DELETE` query. We removed `recovery_min_apply_delay` and added `recovery_target_time` to `recovery.conf`. This effectively lets the replica catch up as fast as possible (no delay) until a certain point in time:\n\n```\nrecovery_target_time = '2018-10-12 09:25:00+00'\n```\n\nWhen operating with physical timestamps, it's worth adding a little margin for error. Obviously, the bigger the margin, the bigger the data loss. On the other hand, if the replica recovers beyond the actual incident timestamp it also replays the `DELETE` query and we would have to start over (or worse: use a cold backup to perform PITR).\n\nAfter restarting the delayed Postgres instance, we saw a lot of WAL segments being replayed until the target transaction time was reached. In order to get a sense of the progress during this phase, we can use this query:\n\n```sql\nSELECT\n  -- current location in WAL\n  pg_last_xlog_replay_location(),\n  -- current transaction timestamp (state of the replica)\n  pg_last_xact_replay_timestamp(),\n  -- current physical time\n  now(),\n  -- the amount of time still to be applied until recovery_target_time has been reached\n  '2018-10-12 09:25:00+00'::timestamptz - pg_last_xact_replay_timestamp() as delay;\n```\n\nWe know recovery is complete when the replay timestamp does not change any more. We can consider setting a [`recovery_target_action`](https://www.postgresql.org/docs/11/recovery-target-settings.html) in order to shut down, promote or pause the instance once replay has completed (the default is to pause).\n\nThe database is now in the state preceding the disastrous query. We can start to export data or otherwise make use of the database. In our case, we exported information about the label that was deleted and its association with issues and merge requests and imported that data into our production database. In other cases with more severe data loss, it can be favorable to promote the replica and continue to use it as a primary. However this means that we lose any data that was entered into the database after the point in time we recovered to.\n\nA more precise alternative to using physical timestamps for targeted recovery is using transaction ids. It is good practice to log transaction ids for e.g. DDL statements (like `DROP TABLE`) using `log_statements = 'ddl'`. If we had a transaction id at hand, we could have used `recovery_target_xid` instead in order to replay to the transaction that preceded the `DELETE` query.\n\nFor the delayed replica, the way back to normal is simple: Revert changes to `recovery.conf` and restart Postgres. After a while, the replica is going to show a delay of eight hours again – ready for any future disasters.\n\n### Benefits for recovery\n\nThe major benefit from a delayed replica over using a cold backup is that it eliminates the step of restoring a full snapshot from the archive. This can easily take hours, depending on network and storage speeds. In our case, it takes roughly five hours to retrieve the full ~2TB basebackup from the archive. In addition to that, we would have to apply 24 hours' worth of WAL in order to recover to the desired state (in the worst case).\n\nWith a delayed replica in place, we get two benefits over a cold backup:\n\n1. No need to retrieve a full basebackup from the archive and\n2. we have a *fixed* window of eight hours' worth of WAL that needs to be replayed to catch up.\n\nIn addition to that, we continuously test our ability to perform PITR from the WAL archive and would quickly realize WAL archive corruption or other WAL-related problems by monitoring the lag of the delayed replica.\n\nIn our example case, completing recovery took 50 minutes and translated to a recovery rate of 110 GB worth of WAL per hour (the archive was still on [AWS S3](https://aws.amazon.com/s3/) at that time). The incident was mitigated and data recovered and restored 90 minutes after work was started.\n\n### Summing up: Where delayed replication can be useful (and where it's not)\n\nDelayed replication can be used as a first resort to recover from accidental data loss and lends itself perfectly to situations where the loss-inducing event is noticed within the configured delay.\n\nLet's be clear though: *Replication is not a backup mechanism*.\n\nBackup and replication are two mechanisms with distinct purposes: A *cold backup* is useful to recover from a disaster, for example an accidental `DELETE` or `DROP TABLE` event. In this case, we utilize a backup from cold storage to restore an earlier state of a table or the whole database. On the other hand, a `DROP TABLE` query replicates nearly instantly to all replicas in a running cluster – hence normal replication on its own is not useful to recover from this scenario. Instead, the purpose of *replication* is mostly to guard database availability against failures of individual database servers and to distribute load.\n\nEven with a delayed replica in place, there are situations where we really want a cold backup that is stored in a safe place: data center failures, silent corruption, or other events that aren't visible right away, are prime candidates to rely on cold backups. With replication only, we'd be out of luck.\n\nNote: For [GitLab.com](https://gitlab.com/), we currently only provide system-level resiliency against data loss and do not provide user-level data recovery in general.\n\nPhoto by [Mathew Schwartz](https://unsplash.com/photos/sb7RUrRMaC4?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[915,703],{"slug":6572,"featured":6,"template":678},"delayed-replication-for-disaster-recovery-with-postgresql","content:en-us:blog:delayed-replication-for-disaster-recovery-with-postgresql.yml","Delayed Replication For Disaster Recovery With Postgresql","en-us/blog/delayed-replication-for-disaster-recovery-with-postgresql.yml","en-us/blog/delayed-replication-for-disaster-recovery-with-postgresql",{"_path":6578,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6579,"content":6585,"config":6591,"_id":6593,"_type":16,"title":6594,"_source":17,"_file":6595,"_stem":6596,"_extension":20},"/en-us/blog/start-using-git",{"title":6580,"description":6581,"ogTitle":6580,"ogDescription":6581,"noIndex":6,"ogImage":6582,"ogUrl":6583,"ogSiteName":692,"ogType":693,"canonicalUrls":6583,"schema":6584},"How to tidy up your merge requests with Git","Here's how to use a Git feature that saves a lot of time and cleans up your MRs.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672243/Blog/Hero%20Images/git-tricks-cover-image.png","https://about.gitlab.com/blog/start-using-git","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to tidy up your merge requests with Git\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ronald van Zon\"}],\n        \"datePublished\": \"2019-02-07\",\n      }",{"title":6580,"description":6581,"authors":6586,"heroImage":6582,"date":6588,"body":6589,"category":14,"tags":6590},[6587],"Ronald van Zon","2019-02-07","\n\nI've worked on a lot of open source projects and one thing they all have in common is\nwhen you create a merge request (or pull request) they will often ask, \"Can you clean up your request?\"\nbecause commits like *fix typo* should not be included in a Git history.\n\nNow there are a few ways of cleaning up commits and I'll show you what I have found to be the easiest way.\n\nBelow is an example scenario where I use a feature of Git that has saved me a lot of time.\nI have a tiny project seen in the image below.\n\n![Git Project](https://about.gitlab.com/images/blogimages/start-using-git/git_demo_project.png){: .shadow.medium.center}\n\nNow I like to run my `main.py` in a test environment to see if it works as expected.\nI like to do that by configuring a `.gitlab-ci.yml` to run `main.py`.\nAlthough this is extremely easy, for this example I made sure I increased the number of commits\nto illustrate my example a bit more clearly. So after some time my commit history looks like this:\n\n![Git Project](https://about.gitlab.com/images/blogimages/start-using-git/git_commits_bad.png){: .shadow.medium.center}\n\nHere you can see my first three commits add `README.md`, `main.py` and `.gitlab-ci.yml`.\nA few commits update my `gitlab-ci` file, trying some stuff out, and fixing typos.\nThere's also a commit that cleans up my `gitlab-ci` and two more to fix and clean up `main.py`.\n\nNow some of you might see this and think, \"Looks good,\" while others might want to scream at me\nfor making a mess out of my commits.\n\nHow do we fix it?\n\n## How to consolidate your commits\n\nFirst, let's revert the last two commits using [reset](https://docs.gitlab.com/ee/gitlab-basics/start-using-git.html#unstage-all-changes-that-have-been-added-to-the-staging-area).\nWe don't want to lose our changes so we will use `git reset --soft HEAD~2`.\n`--soft` will keep our changes of the files and `HEAD~2` tells Git the two commits from `HEAD` position should be reverted.\n\nWe create a new commit, `git commit --fixup 6c29979`. This will create a commit called `fixup! Add main Python file`.\nWhen we run `git rebase -i --autosquash 24d214a` we can see below that our `fixup` commit has been moved below\nthe commit we referenced with the tag *6c29979*. I could save this and the fixup will be merged into the commit above.\n\n![Git Project](https://about.gitlab.com/images/blogimages/start-using-git/git_rebase_1.png){: .shadow.medium.center}\n\nBut if we look at the commits below the *fixup*, we see that all the commits are related to the *.gitlab-ci.yml*\nand by making a small change here, we can clean up my commits in a single go. We will change the *pick* to *fixup* for all\ncommits but `Add default gitlab-ci` (shown in the image below) and we will save this.\n\n![Git Project](https://about.gitlab.com/images/blogimages/start-using-git/git_rebase_2.png){: .shadow.medium.center}\n\nChecking our Git log, we see that our long list of commits has been reduced to just three. There is a big change that\nyou should be aware of: because I have just rewritten my Git history I will have to use `git push --force` to update\nany *remote repository*.\n\n![Git Project](https://about.gitlab.com/images/blogimages/start-using-git/git_commits_good.png){: .shadow.medium.center}\n\nThis looks a lot better now; only the relevant commits are left. But could we have prevented this while working on this\nfeature? The answer is yes.\n\nWe could have used `git commit --amend` to add almost every commit behind *19d8353 Add default gitlab-ci*.\nThis wouldn't require any new commit for any changes that we were making to our `.gitlab-ci.yml` file. We would have ended\nup with the following and we already know how to handle the *fixup*.\n\n![Git Project](https://about.gitlab.com/images/blogimages/start-using-git/git_commits_alternative.png){: .shadow.medium.center}\n\nSomething to keep in mind when using features that rewrite the history of your Git repository: If you already\npushed your previous commits to a *remote repository* you will have to use `git push --force` to overwrite the\nhistory of the *remote repository*. Bad use of this could cause serious problems, so be careful!\nIf you run into trouble, a useful guide that could help you recover from this is [git push --force and how to deal with it](https://evilmartians.com/chronicles/git-push",[4440,727,702],{"slug":6592,"featured":6,"template":678},"start-using-git","content:en-us:blog:start-using-git.yml","Start Using Git","en-us/blog/start-using-git.yml","en-us/blog/start-using-git",{"_path":6598,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6599,"content":6604,"config":6610,"_id":6612,"_type":16,"title":6613,"_source":17,"_file":6614,"_stem":6615,"_extension":20},"/en-us/blog/multi-account-aws-sam-deployments-with-gitlab-ci",{"title":6600,"description":6601,"ogTitle":6600,"ogDescription":6601,"noIndex":6,"ogImage":5327,"ogUrl":6602,"ogSiteName":692,"ogType":693,"canonicalUrls":6602,"schema":6603},"How to set up multi-account AWS SAM deployments with GitLab CI/CD","Our guest author, an AWS Serverless hero, shares how to automate SAM deployments using GitLab CI/CD.","https://about.gitlab.com/blog/multi-account-aws-sam-deployments-with-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to set up multi-account AWS SAM deployments with GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Forrest Brazeal\"}],\n        \"datePublished\": \"2019-02-04\",\n      }",{"title":6600,"description":6601,"authors":6605,"heroImage":5327,"date":6607,"body":6608,"category":14,"tags":6609},[6606],"Forrest Brazeal","2019-02-04","\nI've been working with [serverless](/topics/serverless/) applications in AWS for about three years – that makes me an old salt in serverless terms! So I know that deploying and maintaining a serverless app can be tricky; the tooling often has critical gaps.\n\nAWS's [SAM (Serverless Application Model)](https://aws.amazon.com/serverless/sam/) is an open source framework that makes it easier to define AWS resources – such as Lambda functions, API Gateway APIs and DynamoDB tables – commonly used in serverless applications. Once you lay out your app in a SAM template, the next thing you need is a consistent, repeatable way to get that template off your laptop and deployed in the cloud.\n\nYou need CI/CD.\n\nI've used several different [CI/CD systems](/topics/ci-cd/) to automate SAM deployments, and I always look for the following features:\n\n- A single deployment pipeline that can build once and securely deploy to multiple AWS accounts (dev, staging, prod).\n- Dynamic feature branch deployments, so serverless devs can collaborate in the cloud without stepping on each other.\n- Automated cleanup of feature deployments.\n- Review of our SAM application directly integrated with the CI/CD tool's user interface.\n- Manual confirmation before code is released into production.\n\nIn this post, we'll find out how [GitLab CI](/solutions/continuous-integration/) can check these boxes on its way to delivering effective CI/CD for AWS SAM. You can follow along using [the official example code, available here](https://gitlab.com/gitlab-examples/aws-sam).\n\n## Multi-account AWS deployments\n\nWe'll want to set up our deployment pipeline across multiple AWS accounts, because accounts are the only true security boundary in AWS. We don't want to run any risk of deploying prod data in dev, or vice versa. Our multi-account setup will look something like this:\n\nAny time we work with multiple AWS accounts, we need cross-account [IAM roles](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) in order to authorize deployments. We'll handle this task through the following steps. (All referenced scripts are available in the [example repo](https://gitlab.com/gitlab-examples/aws-sam))\n\n### 1\\. Establish three AWS accounts for development, staging, and production deployments\n\nYou can use existing AWS accounts if you have them, or [provision new ones under an AWS Organization](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html).\n\n### 2\\. Set up GitLab IAM roles in each account\n\nRun the following AWS CLI call with admin credentials in each of the three accounts:\n\n```\naws cloudformation deploy --stack-name GitLabCIRoles --template-file setup-templates/roles.yml --capabilities CAPABILITY_NAMED_IAM --parameter-overrides CIAccountID=\"\u003CAWS Account ID where your GitLab CI/CD runner lives>\" CIAccountSTSCondition=\"\u003CThe aws:userid for the IAM principal used by the Gitlab runner>\"\n  ```\n\nReplace `CIAccountID` and `CIAccountSTSCondition` as indicated with values from the AWS account where your GitLab CI/CD runner exists. (Need help finding the `aws:userid` for your runner’s IAM principal? Check out [this guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable).)\n\nThis CloudFormation template defines two roles: `SharedServiceRole` and `SharedDeploymentRole`. The `SharedServiceRole` is assumed by the GitLab CI/CD runner when calling the AWS CloudFormation service. This role trusts the GitLab CI/CD runner's role. It has permissions to call the CloudFormation service, pass a role via IAM, and access S3 and CloudFront: nothing else. This role is not privileged enough to do arbitrary AWS deployments on its own.\n\nThe `SharedDeploymentRole`, on the other hand, has full administrative access to perform any AWS action. A such, it cannot be assumed directly by the GitLab CI/CD runner. Instead, this role must be \"passed\" to CloudFormation using the service's `RoleArn` parameter. The CloudFormation service trusts the `SharedDeploymentRole` and can use it to deploy whatever resources are needed as part of the pipeline.\n\n### 3\\. Create an S3 bucket for CI artifacts\n\nGrab the AWS account ID for each of your development, staging, and production accounts, then deploy this CloudFormation template **in the account where your GitLab CI/CD Runner exists**:\n\n`aws cloudformation deploy --stack-name GitLabCIBucket --template-file setup-templates/ci-bucket.yml --parameter-overrides DevAwsAccountId=\"\u003CAWS Account ID for dev>\" StagingAwsAccountId=\"\u003CAWS Account ID for staging>\" ProdAwsAccountId=\"\u003CAWS Account ID for prod>\" ArtifactBucketName=\"\u003CA unique name for your bucket>\"`\n\nThis CloudFormation template creates a centralized S3 bucket which holds the artifacts created during your pipeline run. Artifacts are created once for each branch push and reused between staging and production. The bucket policy allows the development, test, and production accounts to reference the same artifacts when deploying CloudFormation stacks -- checking off our \"build once, deploy many\" requirement.\n\n### 4\\. Assume the `SharedServiceRole` before making any cross-account AWS calls\nWe have provided the script `assume-role.sh`, which will assume the provided role and export temporary AWS credentials to the current shell. It is sourced in the various `.gitlab-ci.yml` build scripts.\n\n## Single deployment pipeline\n\nThat brings us to the `.gitlab-ci.yml` file you can see at the root of our example repository. GitLab CI/CD is smart enough to dynamically create and execute the pipeline based on that template when we push code to GitLab. The file has a number of variables at the top that you can tweak based on your environment specifics.\n\n### Stages\n\nOur Gitlab CI/CD pipeline contains seven possible stages, defined as follows:\n\n![Multi-account AWS SAM deployment model with GitLab CI](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/deployment-model.png){: .shadow.medium.center}\n\n```yaml\nstages:\n - test\n - build-dev\n - deploy-dev\n - build-staging\n - deploy-staging\n - create-change-prod\n - execute-change-prod\n```\n\n![Deployment lifecycle stages](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/deployment-lifecycle-stages.png){: .shadow.medium.center}\n\n\"Stages\" are used as a control flow mechanism when building the pipeline. Multiple build jobs within a stage will run in parallel, but all jobs in a given stage must complete before any jobs belonging to the next stage in the list can be executed.\n\nAlthough seven stages are defined here, only certain ones will execute, depending on what kind of Git action triggered our pipeline. We effectively have three stages to any deployment: a \"test\" phase where we run unit tests and dependency scans against our code, a \"build\" phase that packages our SAM template, and a \"deploy\" phase split into two parts: creating a [CloudFormation change set](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html) and then executing that change set in the target environment.\n\n#### Test\n\nOur `.gitlab-ci.yml` file currently runs two types of tests: unit tests against our code, and dependency scans against our third-party Python packages.\n\n##### Unit tests\n\nUnit tests run on every branch pushed to the remote repository. This behavior is defined by the `only: branches` property in the job shown below:\n\n```yaml\ntest:unit:\n stage: test\n only:\n   - branches\n script: |\n   if test -f requirements.txt; then\n       pip install -r requirements.txt\n   fi\n   python -m pytest --ignore=functions/\n```\n\nEvery GitLab CI/CD job runs a script. Here, we install any dependencies, then execute Python unit tests.\n\n##### Dependency scans\n\n[Dependency scans](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/), which can take a few minutes, run only on code pushed to the master branch; it would be counterproductive for developers to wait on them every time they want to test code.\n\nThese scans use a hardcoded, standard Docker image to mount the code and run \"Docker in Docker\" checks against a database of known package vulnerabilities. If a vulnerability is found, the pipeline will log the error without stopping the build (that's what the `allow-failure: true` property does).\n\n#### Build\n\nThe build stage turns our SAM template into CloudFormation and turns our Python code into a valid AWS Lambda deployment package. For example, here's the `build:dev` job:\n\n```yaml\nbuild:dev:\n stage: build-dev\n \u003C\u003C: *build_script\n variables:\n   \u003C\u003C: *dev_variables\n artifacts:\n   paths:\n     - deployment.yml\n   expire_in: 1 week\n only:\n   - branches\n except:\n   - master\n```\n\nWhat's going on here? Note first the combination of `only` and `except` properties to ensure that our development builds happen only on pushes to branches that aren't `master`. We're referring to `dev_variables`, the set of development-specific variables defined at the top of `.gitlab-ci.yml`. And we're running a script, pointed to by `build_script`, which packages our SAM template and code for deployment using the `aws cloudformation package` CLI call.\n\nThe artifact `deployment.yml` is the CloudFormation template output by our package command. It has all the implicit SAM magic expanded into CloudFormation resources. By managing it as an artifact, we can pass it along to further steps in the build pipeline, even though it isn't committed to our repository.\n\n#### Deploy\nOur deployments use AWS CloudFormation to deploy the packaged application in a target AWS environment.\n\nIn development and staging environments, we use the `aws cloudformation deploy` command to create a change set and immediately execute it. In production, we put a manual \"wait\" in the pipeline at this point so you have the opportunity to review the change set before moving onto the \"Execute\" step, which actually calls `aws cloudformation execute-changeset` to update the underlying stack.\n\nOur deployment jobs use a helper script, committed to the top level of the example repository, called `cfn-wait.sh`. This script is needed because the `aws cloudformation` commands don't wait for results; they report success as soon as the stack operation starts. To properly record the deployment results in our job, we need a script that polls the CloudFormation service and throws an error if the deployment or update fails.\n\n## Dynamic feature branch deployments and Review Apps\n\n![Dynamic feature branch deployments and Review Apps](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/dynamic-feature-branch-deployments.png){: .shadow.medium.center}\n\nWhen a non-master branch is pushed to GitLab, our pipeline runs tests, builds the [updated source code](/solutions/source-code-management/), and deploys and/or updates the changed CloudFormation resources in the development AWS account. When the branch is merged into master, or if someone clicks the \"Stop\" button next to the branch's environment in GitLab CI, the CloudFormation stack will be torn down automatically.\n\nIt is perfectly possible, and indeed desirable, to have multiple development feature branches simultaneously deployed as live environments for more efficient parallel feature development and QA. The serverless model makes this a cost-effective strategy for collaborating in the cloud.\n\nIf we are dynamically deploying our application on every branch push, we might like to view it as part of our interaction with the GitLab console (such as during a code review). GitLab supports this with a nifty feature called [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/). Review Apps allow you to specify an \"environment\" as part of a deployment job, as seen in our `deploy:dev` job below:\n\n```yaml\ndeploy:dev:\n \u003C\u003C: *deploy_script\n stage: deploy-dev\n dependencies:\n   - build:dev\n variables:\n   \u003C\u003C: *dev_variables\n environment:\n   name: review/$CI_COMMIT_REF_NAME\n   url: https://${CI_COMMIT_REF_NAME}.${DEV_HOSTED_ZONE_NAME}/services\n   on_stop: stop:dev\n only:\n   - branches\n except:\n   - master\n```\n\nThe link specified in the `url` field of the `environment` property will be accessible in the `Environments` section of GitLab CI/CD or on any merge request of the associated branch. (In the case of the sample SAM application provided with our example, since we don't have a front end to view, the link just takes you to a GET request for the `/services` API endpoint and should display some raw JSON in your browser.)\n\n![Link to live environment](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/link-live-environment.png){: .shadow.medium.center}\n\nThe `on_stop` property specifies what happens when you \"shut down\" the environment in GitLab CI. This can be done manually or by deleting the associated branch. In the case above, we have stopped behavior for dev environments linked to a separate job called `stop:dev`:\n\n```yaml\nstop:dev:\n stage: deploy-dev\n variables:\n   GIT_STRATEGY: none\n   \u003C\u003C: *dev_variables\n \u003C\u003C: *shutdown_script\n when: manual\n environment:\n   name: review/$CI_COMMIT_REF_NAME\n   action: stop\n only:\n   - branches\n except:\n   - master\n```\n\nThis job launches the `shutdown_script` script, which calls `aws cloudformation teardown` to clean up the SAM deployment.\n\nFor safety's sake, there is no automated teardown of staging or production environments.\n\n## Production releases\n\n![Production releases](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/production-releases.png){: .shadow.medium.center}\n\nWhen a change is merged into the master branch, the code is built, tested (including dependency scans) and deployed to the staging environment. This is a separate, stable environment that developers, QA, and others can use to verify changes before attempting to deploy in production.\n\n![Staging environment](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/staging-environment.png){: .shadow.medium.center}\n\nAfter deploying code to the staging environment, the pipeline will create a change set for the production stack, and then pause for a manual intervention. A human user must click a button in the Gitlab CI/CD \"Environments\" view to execute the final change set.\n\n## Now what?\n\nStep back and take a deep breath – that was a lot of information! Let's not lose sight of what we've done here: we've defined a secure, multi-account AWS deployment pipeline in our GitLab repo, integrated tests, builds and deployments, and successfully rolled a SAM-defined serverless app to the cloud. Not bad for a few lines of config!\n\nThe next step is to try this on your own. If you'd like to start with our sample \"AWS News\" application, you can simply run `sam init --location git+https://gitlab.com/gitlab-examples/aws-sam` to download the project on your local machine. The AWS News app contains a stripped-down, single-account version of the `gitlab-ci.yml` file discussed in this post, so you can try out deployments with minimal setup needed.\n\n## Further reading\n\nWe have barely scratched the surface of GitLab CI/CD and AWS SAM in this post. Here are some interesting readings if you would like to take your work to the next level:\n\n### SAM\n\n- [Implementing safe AWS Lambda deployments with AWS SAM and CodeDeploy](https://aws.amazon.com/blogs/compute/implementing-safe-aws-lambda-deployments-with-aws-codedeploy/)\n- [Running and debugging serverless applications locally using the AWS SAM CLI](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-test-and-debug.html)\n\n### GitLab CI\n\n- [Setting up a GitLab Runner on EC2](https://hackernoon.com/configuring-gitlab-ci-on-aws-ec2-using-docker-7c359d513a46)\n- [Scheduled pipelines](https://docs.gitlab.com/ee/ci/pipelines/schedules.html)\n- [ChatOps](https://docs.gitlab.com/ee/ci/chatops/)\n\nPlease [let me know](https://twitter.com/forrestbrazeal) if you have further questions!\n\n### About the guest author\n\nForrest Brazeal is an [AWS Serverless Hero](https://aws.amazon.com/developer/community/heroes/forrest-brazeal/). He currently works as a senior cloud architect at [Trek10](https://trek10.com), an AWS Advanced Consulting Partner. You can [read more about Trek10's GitLab journey here](/customers/trek10/).\n",[110,2932,232,703,1286,4440],{"slug":6611,"featured":6,"template":678},"multi-account-aws-sam-deployments-with-gitlab-ci","content:en-us:blog:multi-account-aws-sam-deployments-with-gitlab-ci.yml","Multi Account Aws Sam Deployments With Gitlab Ci","en-us/blog/multi-account-aws-sam-deployments-with-gitlab-ci.yml","en-us/blog/multi-account-aws-sam-deployments-with-gitlab-ci",{"_path":6617,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6618,"content":6624,"config":6630,"_id":6632,"_type":16,"title":6633,"_source":17,"_file":6634,"_stem":6635,"_extension":20},"/en-us/blog/pre-commit-post-deploy-is-dead",{"title":6619,"description":6620,"ogTitle":6619,"ogDescription":6620,"noIndex":6,"ogImage":6621,"ogUrl":6622,"ogSiteName":692,"ogType":693,"canonicalUrls":6622,"schema":6623},"Pre-commit and post-deploy code reviews are dead","In a world with Git, pre-commit and post-deploy code reviews are relics that can be eliminated from your workflow.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678861/Blog/Hero%20Images/pre-commit.jpg","https://about.gitlab.com/blog/pre-commit-post-deploy-is-dead","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Pre-commit and post-deploy code reviews are dead\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2019-01-31\",\n      }",{"title":6619,"description":6620,"authors":6625,"heroImage":6621,"date":6627,"body":6628,"category":14,"tags":6629},[6626],"Aricka Flowers","2019-01-31","\nPre-commit and post-deploy reviews have been the industry standard for ensuring that code is functioning as intended. But with Git around, are these methods still needed?\n\nLet’s take a step back and look at how they work.\n\n### Pre-commit reviews require that code is checked for bugs before it is committed\n\nOur CEO [Sid Sijbrandij](/company/team/#sytses) says pre-commit reviews makes sense because new code is evaluated before it is introduced into the code base. But with distributed version control, he says, you can essentially [do the same thing on Git branches](https://docs.gitlab.com/ee/topics/gitlab_flow.html). Prior to Git, branches were too pricey to use regularly in [version control systems](/topics/version-control/) like Subversion.\n\n### Post-deploy reviews periodically check for areas of improvement in the code base\n\nPost-deploy reviews are typically done on a periodic basis as a way to check certain areas of the code base and decide if improvements can be made. This method doesn’t make sense, according to Sid, because \"The code has already proven itself in production ... so you’re reluctant to make changes to it.\" Additionally, the idea of occasionally reviewing your code base is not really needed:\n\n\"If there's technical debt in there, at least it's not affecting other code,\" Sid explains. \"There's a certain interest you pay on technical debt, and it has to do with how much it spreads the technical debt to your code base. Code that is not doing much, meaning it's being executed but it's not changing much, well at least it's not influencing other code. You're always going to have tech debt, and you're always going to have a limited time during which you can review and fix things. Focus on the code that's active, that's probably the best place to focus.\"\n\n### Git branches are more efficient\n\nUsing Git branches to ensure that code is safe to introduce into the code base improves efficiencies when compared to pre-commit and post-deploy reviews, says Sid, who finds the former to be hard to track.\n\n\"Pre-commit code reviews were a bit awkward because you didn't have a good way to refer to it. It was in the tool, but you didn't have a SHA or definite way to refer to that version. And it was hard to know what CI it ran against because there wasn't a SHA. So by doing it post-commit, you have it in versions and it's much easier to see what you referred to. But with code review after deploy, the mindset was, 'If it works, you move on.'\n\n> \"If you change it, there's extra risk; if you don't change it, it's extra tech debt – and you always have to choose between the two.\"\n\n\"You're not going to be as vigilant to technical debt building up and it's harder to request that someone change something that’s working. If you change it, there's extra risk; if you don't change it, it's extra tech debt – and you always have to choose between the two. With pre-deploy code reviews, you don't have to make that choice …  [With what we have now], I think pre-commit and post-deploy code reviews are dead, and code should be reviewed on a branch before it's deployed to production.\"\n\nWhat do you think: Are pre-commit and post-deploy reviews a thing of the past? Tweet us @GitLab!\n{: .alert .alert-gitlab-purple.text-center}\n\nPhoto by [Caspar Camille Rubin](https://unsplash.com/photos/fPkvU7RDmCo?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on Unsplash\n{: .note}\n",[1084,702,727],{"slug":6631,"featured":6,"template":678},"pre-commit-post-deploy-is-dead","content:en-us:blog:pre-commit-post-deploy-is-dead.yml","Pre Commit Post Deploy Is Dead","en-us/blog/pre-commit-post-deploy-is-dead.yml","en-us/blog/pre-commit-post-deploy-is-dead",{"_path":6637,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6638,"content":6645,"config":6651,"_id":6653,"_type":16,"title":6654,"_source":17,"_file":6655,"_stem":6656,"_extension":20},"/en-us/blog/android-publishing-with-gitlab-and-fastlane",{"title":6639,"description":6640,"ogTitle":6641,"ogDescription":6640,"noIndex":6,"ogImage":6642,"ogUrl":6643,"ogSiteName":692,"ogType":693,"canonicalUrls":6643,"schema":6644},"Publishing Android apps to Play Store with GitLab & fastlane","See how GitLab, together with fastlane, can build, sign, and publish apps for Android to the Google Play Store.","HPublishing Android apps to Play Store with GitLab & fastlane","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679918/Blog/Hero%20Images/android-fastlane-pipeline.png","https://about.gitlab.com/blog/android-publishing-with-gitlab-and-fastlane","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to publish Android apps to the Google Play Store with GitLab and fastlane\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jason Yavorska\"}],\n        \"datePublished\": \"2019-01-28\",\n      }",{"title":6646,"description":6640,"authors":6647,"heroImage":6642,"date":6648,"body":6649,"category":14,"tags":6650},"How to publish Android apps to the Google Play Store with GitLab and fastlane",[4945],"2019-01-28","\n\nWhen we heard about [_fastlane_](https://fastlane.tools), an app automation tool for delivering iOS and Android builds, we wanted to give it a spin to see if a combination of GitLab and _fastlane_ could help us bring our mobile build and deployment automation to the next level and make mobile development a bit easier. You can see an [actual production deployment](https://gitlab.com/gitlab-org/gitter/gitter-android-app/pipelines/40768761) of the [Gitter Android app](https://gitlab.com/gitlab-org/gitter/gitter-android-app) that uses what we'll be implementing in this blog post; suffice to say, the results were fantastic and we've become big believers that the combination of GitLab and _fastlane_ is a truly game-changing way for developers to [enable CI/CD](/topics/ci-cd/) (continuous integration and continuous delivery) for their mobile applications. With GitLab and _fastlane_ we're getting, with minimal effort:\n\n- Source control, project home, issue tracking, and everything else that comes with GitLab.\n- Content and images (metadata) for Google Play Store listing managed in source control.\n- Automatic signing, version numbers, and changelog.\n- Automatic publishing to `internal` distribution channel in Google Play Store.\n- Manual promotion through `alpha`, `beta`, and `production` channels.\n- Containerized build environment, available in GitLab's container registry.\n\nIf you'd like to jump ahead and see the finished product, you can take a look at the already-completed Gitter for Android [.gitlab-ci.yml](https://gitlab.com/gitlab-org/gitter/gitter-android-app/blob/master/.gitlab-ci.yml), [build.gradle](https://gitlab.com/gitlab-org/gitter/gitter-android-app/blob/master/app/build.gradle), [Dockerfile](https://gitlab.com/gitlab-org/gitter/gitter-android-app/blob/master/Dockerfile), and [_fastlane_ configuration](https://gitlab.com/gitlab-org/gitter/gitter-android-app/tree/master/fastlane).\n\n## Configuring _fastlane_\n\nWe'll begin first by setting up _fastlane_ in our project, make a couple key changes to our Gradle configuration, and then wrap everything up in a GitLab pipeline.\n\n_fastlane_ has pretty good [documentation](https://docs.fastlane.tools/getting-started/android/setup/) to get you started, and if you run into platform-specific trouble it's the first place to check, but to get under way you really just need to complete a few straightforward steps.\n\n### Initializing your project\n\nFirst up, you need to get _fastlane_ installed locally and initialize your product. We're using the Ruby `fastlane` gem so you'll need Ruby on your system for this to work. You can read about [other install options in the _fastlane_ documentation](https://docs.fastlane.tools/getting-started/android/setup/).\n\n``` ruby\nsource \"https://rubygems.org\"\n\ngem \"fastlane\"\n```\n\nOnce your Gemfile is updated, you can run `bundle update` to update/generate your `Gemfile.lock`. From this point you can run _fastlane_ by typing `bundle exec fastlane`. Later, you'll see that in CI/CD we use `bundle install ...` to ensure the command runs within the context of our project environment.\n\nNow that we have _fastlane_ ready to run, we just need to initialize our repo with our configuration. Run `bundle exec fastlane init` from within your project directory, answer a few questions, and _fastlane_ will create a new `./fastlane` directory containing its configuration.\n\n### Setting up _supply_\n\n_supply_ is a feature built into _fastlane_ which will help you manage screenshots, descriptions, and other localized metadata/assets for publishing to the Google Play Store.\n\nPlease refer to these [detailed instructions for collecting the credentials necessary to run _supply_](https://docs.fastlane.tools/getting-started/android/setup/#setting-up-supply).\n\nOnce you've set this up, simply run `bundle exec fastlane supply init` and all your current metadata will be downloaded from your store listing and saved in `fastlane/metadata/android`. From this point you're able to manage all of your store content as-code; when we publish a new version to the store later, the versions of content checked into your source repo will be used to populate the entry.\n\n### Appfile\n\nThe `./fastlane/Appfile` is pretty straightforward, and contains basic configuration you chose when you initialized your project. Later we'll see how to inject the `json_key_file` in your CI/CD pipeline at runtime.\n\n`./fastlane/Appfile`\n``` yaml\njson_key_file(\"~/google_play_api_key.json\") # Path to the json secret file - Follow https://docs.fastlane.tools/actions/supply/#setup to get one\npackage_name(\"im.gitter.gitter\") # e.g. com.krausefx.app\n```\n\n### Fastfile\n\nThe `./fastlane/Fastfile` is more interesting, and contains the first changes you'll see that we made for Gitter vs. the default one created when you run `bundle exec fastlane init`.\n\nThe first section contains our definitions for how we want to run builds and tests. As you can see, this is pretty straightforward and builds right on top of your already set up Gradle tasks.\n\n`./fastlane/Fastfile`\n``` yaml\ndefault_platform(:android)\n\nplatform :android do\n\n  desc \"Builds the debug code\"\n  lane :buildDebug do\n    gradle(task: \"assembleDebug\")\n  end\n\n  desc \"Builds the release code\"\n  lane :buildRelease do\n    gradle(task: \"assembleRelease\")\n  end\n\n  desc \"Runs all the tests\"\n  lane :test do\n    gradle(task: \"test\")\n  end\n\n...\n```\n\nCreating Gradle tasks that publish/promote builds can be complicated and error prone, but _fastlane_ makes this much easier by giving you pre-built commands (called _fastlane_ actions) that let you perform complex tasks with just a few simple actions.\n\nIn our example, we've set up a workflow where a new build can be published to the internal track and then optionally promoted through alpha, beta, and ultimately production. We initially had a new build for each track but it's safer to have the same/known build go through the whole process.\n\n``` yaml\n...\n\n  desc \"Submit a new Internal Build to Play Store\"\n  lane :internal do\n    upload_to_play_store(track: 'internal', apk: 'app/build/outputs/apk/release/app-release.apk')\n  end\n\n  desc \"Promote Internal to Alpha\"\n  lane :promote_internal_to_alpha do\n    upload_to_play_store(track: 'internal', track_promote_to: 'alpha')\n  end\n\n  desc \"Promote Alpha to Beta\"\n  lane :promote_alpha_to_beta do\n    upload_to_play_store(track: 'alpha', track_promote_to: 'beta')\n  end\n\n  desc \"Promote Beta to Production\"\n  lane :promote_beta_to_production do\n    upload_to_play_store(track: 'beta', track_promote_to: 'production')\n  end\nend\n```\n\nAn important note is that we've only scratched the surface of the kinds of actions that _fastlane_ can automate. You can [read more about available actions here](https://docs.fastlane.tools/actions/), and it's even possible to create your own.\n\n## Gradle configuration\n\nWe also made a couple of key changes to our basic Gradle configuration to make publishing easier. Nothing major here, but it does help us make things run a little more smoothly.\n\n### Secret properties\n\nThe first changed section gathers the secret variables to be used for signing. These are either loaded via configuration file, or gathered from environment variables in the case of CI.\n\n`app/build.gradle`\n``` groovy\n// Try reading secrets from file\ndef secretsPropertiesFile = rootProject.file(\"secrets.properties\")\ndef secretProperties = new Properties()\n\nif (secretsPropertiesFile.exists()) {\n    secretProperties.load(new FileInputStream(secretsPropertiesFile))\n}\n// Otherwise read from environment variables, this happens in CI\nelse {\n    secretProperties.setProperty(\"oauth_client_id\", \"\\\"${System.getenv('oauth_client_id')}\\\"\")\n    secretProperties.setProperty(\"oauth_client_secret\", \"\\\"${System.getenv('oauth_client_secret')}\\\"\")\n    secretProperties.setProperty(\"oauth_redirect_uri\", \"\\\"${System.getenv('oauth_redirect_uri')}\\\"\")\n    secretProperties.setProperty(\"google_project_id\", \"\\\"${System.getenv('google_project_id') ?: \"null\"}\\\"\")\n    secretProperties.setProperty(\"signing_keystore_password\", \"${System.getenv('signing_keystore_password')}\")\n    secretProperties.setProperty(\"signing_key_password\", \"${System.getenv('signing_key_password')}\")\n    secretProperties.setProperty(\"signing_key_alias\", \"${System.getenv('signing_key_alias')}\")\n}\n```\n\n### Automatic versioning\n\nWe also set up automatic versioning using environment variables `VERSION_CODE`, `VERSION_SHA`, which we will set up later in CI/CD (locally they will just be `null` which is fine). Because each build's `versionCode` that you submit to the Google Play Store needs to be higher than the last, this makes it simple to deal with.\n\n`app/build.gradle`\n``` groovy\nandroid {\n    defaultConfig {\n        applicationId \"im.gitter.gitter\"\n        minSdkVersion 19\n        targetSdkVersion 26\n        versionCode Integer.valueOf(System.env.VERSION_CODE ?: 0)\n        // Manually bump the semver version part of the string as necessary\n        versionName \"3.2.0-${System.env.VERSION_SHA}\"\n```\n\n### Signing configuration\n\nFinally, we inject the signing configuration which will automatically be used by Gradle to sign the release build. Depending on your configuration, you may already be doing this. We only worry about signing in the release build that would potentially be published to the Google Play Store.\n\n> When using App Signing by Google Play, you will use two keys: the app signing key and the upload key. You keep the upload key and use it to sign your app for upload to the Google Play Store.\n>\n> [*https://developer.android.com/studio/publish/app-signing#google-play-app-signing*](https://developer.android.com/studio/publish/app-signing#google-play-app-signing)\n\n> IMPORTANT: Google will not re-sign any of your existing or new APKs that are signed with the app signing key. This enables you to start testing your app bundle in the internal test, alpha, or beta tracks while you continue to release your existing APK in production without Google Play changing it.\n>\n> *`https://play.google.com/apps/publish/?account=xxx#KeyManagementPlace:p=im.gitter.gitter&appid=xxx`*\n\n`app/build.gradle`\n``` groovy\n    signingConfigs {\n        release {\n            // You need to specify either an absolute path or include the\n            // keystore file in the same directory as the build.gradle file.\n            storeFile file(\"../android-signing-keystore.jks\")\n            storePassword \"${secretProperties['signing_keystore_password']}\"\n            keyAlias \"${secretProperties['signing_key_alias']}\"\n            keyPassword \"${secretProperties['signing_key_password']}\"\n        }\n    }\n    buildTypes {\n        release {\n            minifyEnabled false\n            testCoverageEnabled false\n            proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'\n            signingConfig signingConfigs.release\n        }\n    }\n}\n```\n\n## Setting up the Docker build environment\n\nWe are building a Docker image to be used as a repeatable, consistent build environment which will speed things up because it will already have the dependencies downloaded and installed. We're just fetching a few prerequisites, installing the Android SDK, and then grabbing _fastlane_.\n\n`Dockerfile`\n```dockerfile\nFROM openjdk:8-jdk\n\n# Just matched `app/build.gradle`\nENV ANDROID_COMPILE_SDK \"26\"\n# Just matched `app/build.gradle`\nENV ANDROID_BUILD_TOOLS \"28.0.3\"\n# Version from https://developer.android.com/studio/releases/sdk-tools\nENV ANDROID_SDK_TOOLS \"24.4.1\"\n\nENV ANDROID_HOME /android-sdk-linux\nENV PATH=\"${PATH}:/android-sdk-linux/platform-tools/\"\n\n# install OS packages\nRUN apt-get --quiet update --yes\nRUN apt-get --quiet install --yes wget tar unzip lib32stdc++6 lib32z1 build-essential ruby ruby-dev\n# We use this for xxd hex->binary\nRUN apt-get --quiet install --yes vim-common\n# install Android SDK\nRUN wget --quiet --output-document=android-sdk.tgz https://dl.google.com/android/android-sdk_r${ANDROID_SDK_TOOLS}-linux.tgz\nRUN tar --extract --gzip --file=android-sdk.tgz\nRUN echo y | android-sdk-linux/tools/android --silent update sdk --no-ui --all --filter android-${ANDROID_COMPILE_SDK}\nRUN echo y | android-sdk-linux/tools/android --silent update sdk --no-ui --all --filter platform-tools\nRUN echo y | android-sdk-linux/tools/android --silent update sdk --no-ui --all --filter build-tools-${ANDROID_BUILD_TOOLS}\nRUN echo y | android-sdk-linux/tools/android --silent update sdk --no-ui --all --filter extra-android-m2repository\nRUN echo y | android-sdk-linux/tools/android --silent update sdk --no-ui --all --filter extra-google-google_play_services\nRUN echo y | android-sdk-linux/tools/android --silent update sdk --no-ui --all --filter extra-google-m2repository\n# install Fastlane\nCOPY Gemfile.lock .\nCOPY Gemfile .\nRUN gem install bundle\nRUN bundle install\n```\n\n## Setting up GitLab\n\nWith our build environment ready, let's set up our `.gitlab-ci.yml` to tie it all together in a CI/CD pipeline.\n\n### Stages\n\nThe first thing we do is define the stages that we're going to use. We'll set up our build environment, do our debug and release builds, run our tests, deploy to internal, and then promote through alpha, beta, and production. You can see that, apart from `environment`, these map to the lanes we set up in our `Fastfile`.\n\n``` yaml\nstages:\n  - environment\n  - build\n  - test\n  - internal\n  - alpha\n  - beta\n  - production\n```\n\n### Build environment update\n\nNext up we're going to update our build environment, if needed. If you're not familiar with `.gitlab-ci.yml` it may look like there's a lot going on here, but we'll take it one step at a time. The very first thing we do is set up an `.updateContainerJob` yaml template which can be used to capture shared configuration for other steps that want to use it. In this case, it will be used by the subsequent `updateContainer` and `ensureContainer` jobs.\n\n#### `.updateContainerJob` template\n\nIn this case, since we're dealing with Docker in Docker (`dind`), we are running some scripts which log into the local [GitLab container registry](https://docs.gitlab.com/ee/user/packages/container_registry/index.html), fetch the latest image to be used as a layer cache reference, build a new image, and finally push the new version to the registry.\n\n``` yaml\n.updateContainerJob:\n  image: docker:stable\n  stage: environment\n  services:\n    - docker:dind\n  script:\n    - docker login -u gitlab-ci-token -p $CI_JOB_TOKEN $CI_REGISTRY\n    - docker pull $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG || true\n    - docker build --cache-from $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG -t $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG .\n    - docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n```\n\n#### `updateContainer` job\n\nThe first job that inherits `.updateContainerJob`, `updateContainer`, only runs if the `Dockerfile` was updated and will run through the template steps described above.\n\n``` yaml\nupdateContainer:\n  extends: .updateContainerJob\n  only:\n    changes:\n      - Dockerfile\n```\n\n#### `ensureContainer` job\n\nBecause the first pipeline on a branch can fail, the `only: changes: Dockerfile` syntax won't trigger for a subsequent pipeline after you fix things. This can leave your branch without a Docker image to use. So the `ensureContainer` job will look for an existing image and only build one if it doesn't exist. The one downside to this is that both of these jobs will run at the same time if it is a new branch.\n\nIdeally, we could just use `$CI_REGISTRY_IMAGE:master` as a fallback when `$CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG` isn't found but there isn't any syntax for this.\n\n``` yaml\nensureContainer:\n  extends: .updateContainerJob\n  allow_failure: true\n  before_script:\n    - \"mkdir -p ~/.docker && echo '{\\\"experimental\\\": \\\"enabled\\\"}' > ~/.docker/config.json\"\n    - docker login -u gitlab-ci-token -p $CI_JOB_TOKEN $CI_REGISTRY\n    # Skip update container `script` if the container already exists\n    # via https://gitlab.com/gitlab-org/gitlab-ce/issues/26866#note_97609397 -> https://stackoverflow.com/a/52077071/796832\n    - docker manifest inspect $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG > /dev/null && exit || true\n```\n\n### Build and test\n\nWith our build environment ready, we're ready to build our `debug` and `release` targets. Similar to above, we use a template to set up repeated steps within our build jobs, avoiding duplication. Within this section, the first thing we do is set the image to the build environment container image we built in the previous step.\n\n#### `.build_job` template\n\n``` yaml\n.build_job:\n  image: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n  stage: build\n\n...\n```\n\nNext up is a step that's specific to Gitter, but if you use shared assets between a iOS and Android build you might consider doing something similar. What we're doing here is grabbing the latest mobile artifacts built by the web application pipeline and placing them in the appropriate location.\n\n``` yaml\n  before_script:\n    - wget --output-document=artifacts.zip --quiet \"https://gitlab.com/gitlab-org/gitter/webapp/-/jobs/artifacts/master/download?job=mobile-asset-build\"\n    - unzip artifacts.zip\n    - mkdir -p app/src/main/assets/www\n    - mv output/android/www/* app/src/main/assets/www/\n```\n\nNext, we use [project-level variables](https://docs.gitlab.com/ee/ci/variables/) containing a binary (hex) dump of our signing keystore file and convert it back to a binary file. This allows us to inject the file into the build at runtime instead of checking it into source control, a potential security concern. To get the `signing_jks_file_hex` variable hex value, we use this binary -> hex command, `xxd -p gitter-android-app.jks`\n\n``` yaml\n    # We store this binary file in a variable as hex with this command, `xxd -p gitter-android-app.jks`\n    # Then we convert the hex back to a binary file\n    - echo \"$signing_jks_file_hex\" | xxd -r -p - > android-signing-keystore.jks\n```\n\nHere we're setting the version at runtime – these environment variables will be used by the Gradle build as implemented above. Because `$CI_PIPELINE_IID` increments on each pipeline, we can guarantee our `versionCode` is always higher than the last and be able to publish to the Google Play Store.\n\n``` yaml\n    # We add 100 to get this high enough above current versionCodes that are published\n    - \"export VERSION_CODE=$((100 + $CI_PIPELINE_IID)) && echo $VERSION_CODE\"\n    - \"export VERSION_SHA=`echo ${CI_COMMIT_SHORT_SHA}` && echo $VERSION_SHA\"\n```\n\nNext, we automatically generate a changelog to include by copying whatever you have in `CURRENT_VERSION.txt` to the current `\u003CversionCode>.text`. You can update `CURRENT_VERSION.txt` as necessary. I won't dive into the details of the merge request (MR) creation script here since it's somewhat specific to Gitter, but if you're interested in how something like this might work check out the [`create-changlog-mr.sh` script](https://gitlab.com/gitlab-org/gitter/gitter-android-app/blob/master/ci-scripts/create-changlog-mr.sh).\n\n``` yaml\n    # Make the changelog\n    - cp ./fastlane/metadata/android/en-GB/changelogs/CURRENT_VERSION.txt \"./fastlane/metadata/android/en-GB/changelogs/$VERSION_CODE.txt\"\n    # We allow the remote push and MR creation to fail because the other job could create it\n    # and it's not strictly necessary (we just need the file locally for the CI/CD build)\n    - ./ci-scripts/create-changlog-mr.sh || true\n    # Because we allow the MR creation to fail, just make sure we are back in the right repo state\n    - git checkout \"$CI_COMMIT_SHA\"\n```\n\nJust a couple of final items: First, whenever a build job is done, we remove the jks file just to be sure it doesn't get saved to artifacts, and second we set up the artifact directory from where the output of the build (`.apk`) will be saved.\n\n``` yaml\n  after_script:\n    - rm android-signing-keystore.jks || true\n  artifacts:\n    paths:\n    - app/build/outputs\n```\n\n#### `buildDebug` and `buildRelease` jobs\n\nMost of the complexity here was set up in the template, so as you can see our `buildDebug` and `buildRelease` job definitions are very clear. Both just call the appropriate _fastlane_ task (which, if you remember, then calls the appropriate Gradle task). The `buildRelease` output is associated with the `production` environment so we can define an extra production-scoped set of [project-level variables](https://docs.gitlab.com/ee/ci/variables/) which are different from our testing variables.\n\nSince we set up code signing in the Gradle config (`build.gradle`) earlier, we can be confident here that our `release` builds are appropriately signed and ready for publishing.\n\n```\nbuildDebug:\n  extends: .build_job\n  script:\n    - bundle exec fastlane buildDebug\n\nbuildRelease:\n  extends: .build_job\n  script:\n    - bundle exec fastlane buildRelease\n  environment:\n    name: production\n```\n\nTesting is really just another instance of the same thing, but instead of calling one of the build lanes we call the test lane. Note that we are using a `dependency` from the `buildDebug` job to ensure we don't need to rebuild anything.\n\n``` yaml\ntestDebug:\n  image: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n  stage: test\n  dependencies:\n    - buildDebug\n  script:\n    - bundle exec fastlane test\n```\n\n### Publish\n\nNow that our code is being built, we're ready to publish to the Google Play Store. We only *publish* to the `internal` testing track and *promote* this same build to the rest of the tracks.\n\nThis is achieved through the _fastlane_ integration, using a pre-built action to handle the job. In this case we are using a `dependency` on the `buildRelease` job, and creating a local copy of the Google API JSON keyfile (again stored in a [project-level variable](https://docs.gitlab.com/ee/ci/variables/) instead of checking it into source control.) We have this job (and all subsequent jobs) set to run only on `manual` action so we have full human control/intervention from this point forward. If you prefer to continuously deliver to your `internal` track you'd simply need to remove the `when: manual` entry and you'd have achieved your goal.\n\nIf you're like me, this may seem too easy to work. With everything we've configured in GitLab and _fastlane_ to this point, it's really this simple!\n\n``` yaml\npublishInternal:\n  image: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n  stage: internal\n  dependencies:\n    - buildRelease\n  when: manual\n  before_script:\n    - echo $google_play_service_account_api_key_json > ~/google_play_api_key.json\n  after_script:\n    - rm ~/google_play_api_key.json\n  script:\n    - bundle exec fastlane internal\n```\n\n### Promote\n\nAs indicated earlier, promotion through alpha, beta, and production are all `manual` jobs. If internal testing is good, it can be promoted one step forward in sequence all the way through to production using these manual jobs.\n\nIf you're with me to this point, there's really nothing new here and this really highlights the power of GitLab with _fastlane_. We have a `.promote_job` template job which creates the local Google API JSON key file and the promote jobs themselves are basically identical.\n\n``` yaml\n.promote_job:\n  image: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n  when: manual\n  dependencies: []\n  only:\n    - master\n  before_script:\n    - echo $google_play_service_account_api_key_json > ~/google_play_api_key.json\n  after_script:\n    - rm ~/google_play_api_key.json\n\npromoteAlpha:\n  extends: .promote_job\n  stage: alpha\n  script:\n    - bundle exec fastlane promote_internal_to_alpha\n\npromoteBeta:\n  extends: .promote_job\n  stage: beta\n  script:\n    - bundle exec fastlane promote_alpha_to_beta\n\npromoteProduction:\n  extends: .promote_job\n  stage: production\n  script:\n    - bundle exec fastlane promote_beta_to_production\n```\n\nNote that we're `only` allowing production promotion from the `master` branch, instead of from any branch. This is to ensure that the production build uses the separate set of `production` environment variables which only happens for the `buildRelease` job. We also have these [variables set as protected](https://docs.gitlab.com/ee/ci/variables/#protected-variables) so we can enforce that they are only used on the `master` branch which is protected.\n\n### Variables\n\nThe last step is to make sure you set up the [project-level variables](https://docs.gitlab.com/ee/ci/variables/) we used throughout the configuration above:\n\n - `google_play_service_account_api_key_json`: see [https://docs.fastlane.tools/getting-started/android/setup/#collect-your-google-credentials](https://docs.fastlane.tools/getting-started/android/setup/#collect-your-google-credentials)\n - `oauth_client_id`\n - `oauth_client_id`, protected, `production` environment\n - `oauth_client_secret`\n - `oauth_client_secret`, protected, `production` environment\n - `oauth_redirect_uri`\n - `oauth_redirect_uri`, protected, `production` environment\n - `signing_jks_file_hex`: `xxd -p gitter-android-app.jks`\n - `signing_key_alias`\n - `signing_key_password`\n - `signing_keystore_password`\n\nIf you are using the same [`create-changlog-mr.sh` script](https://gitlab.com/gitlab-org/gitter/gitter-android-app/blob/master/ci-scripts/create-changlog-mr.sh) as us,\n\n - `deploy_key_android_repo`: see [https://docs.gitlab.com/ee/user/project/deploy_tokens/](https://docs.gitlab.com/ee/user/project/deploy_tokens/)\n - `gitlab_api_access_token`: see [https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html) (we use a bot user)\n\n![Project variables for Gitter for Android](https://about.gitlab.com/images/blogimages/android-fastlane-variables.png){: .shadow.medium.center}\n\n## What's next\n\nUsing this configuration we've got Gitter for Android building, signing, deploying to our internal track, and publishing to production as frequently as we like. Next up will be to do the same for iOS, so watch this space for our next post!\n\nPhoto by [Patrick Tomasso](https://unsplash.com/@impatrickt) on [Unsplash](https://unsplash.com/photos/KGcLJwIYiac)\n{: .note}\n",[110,232,728,749],{"slug":6652,"featured":6,"template":678},"android-publishing-with-gitlab-and-fastlane","content:en-us:blog:android-publishing-with-gitlab-and-fastlane.yml","Android Publishing With Gitlab And Fastlane","en-us/blog/android-publishing-with-gitlab-and-fastlane.yml","en-us/blog/android-publishing-with-gitlab-and-fastlane",{"_path":6658,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6659,"content":6665,"config":6671,"_id":6673,"_type":16,"title":6674,"_source":17,"_file":6675,"_stem":6676,"_extension":20},"/en-us/blog/support-microsoft-exchange-google-groups-incoming-email",{"title":6660,"description":6661,"ogTitle":6660,"ogDescription":6661,"noIndex":6,"ogImage":6662,"ogUrl":6663,"ogSiteName":692,"ogType":693,"canonicalUrls":6663,"schema":6664},"Microsoft Exchange & Google Groups now supported by GitLab","You now have even more choices when configuring an email server for your self-managed GitLab instance","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749684071/Blog/Hero%20Images/mailboxes.jpg","https://about.gitlab.com/blog/support-microsoft-exchange-google-groups-incoming-email","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab now supports Microsoft Exchange and Google Groups for incoming email features like Service Desk\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Victor Wu\"}],\n        \"datePublished\": \"2019-01-23\",\n      }",{"title":6666,"description":6661,"authors":6667,"heroImage":6662,"date":6669,"body":6670,"category":14},"GitLab now supports Microsoft Exchange and Google Groups for incoming email features like Service Desk",[6668],"Victor Wu","2019-01-23","\n\nAs of [GitLab 11.7](/releases/2019/01/22/gitlab-11-7-released/), you can now use either Microsoft Exchange or Google Groups with your self-managed GitLab instance, configured as an email server, giving you even more options when setting up incoming email functionality. Users of GitLab.com are unaffected by this change, and already have access to features requiring incoming email, per their specific [subscription](/pricing/#gitlab-com).\n\nThere are several important features in GitLab that depend on users being able to receive emails from GitLab:\n- [Comment on issues and merge requests by replying to notification emails](https://docs.gitlab.com/ee/administration/reply_by_email.html)\n- [Create a new issue via email with a user-specific email address](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#new-issue-via-email)\n- [Create a new merge request via merge request with a user-specific email address](https://docs.gitlab.com/ee/user/project/merge_requests/index.html#create-new-merge-requests-by-email)\n- [Service Desk: Have your customers email in feedback/support which are converted directly into GitLab issues](https://docs.gitlab.com/ee/user/project/service_desk.html)\n\nPreviously, these features were available to self-managed GitLab users only if your email server supported sub-addressing. Microsoft Exchange and Google Groups do not support sub-addressing. GitLab 11.7 now supports both sub-addressing and catch-all email mailboxes, enabling compatibility with these two popular options that do provide catch-all email mailboxes. \n\nRead more about [configuring incoming email in GitLab](https://docs.gitlab.com/ee/administration/incoming_email.html).\n\n[Cover image](https://unsplash.com/photos/fb7yNPbT0l8) by [mathyaskurmann](https://unsplash.com/@mathyaskurmann) on Unsplash\n{: .note}\n",{"slug":6672,"featured":6,"template":678},"support-microsoft-exchange-google-groups-incoming-email","content:en-us:blog:support-microsoft-exchange-google-groups-incoming-email.yml","Support Microsoft Exchange Google Groups Incoming Email","en-us/blog/support-microsoft-exchange-google-groups-incoming-email.yml","en-us/blog/support-microsoft-exchange-google-groups-incoming-email",{"_path":6678,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6679,"content":6685,"config":6691,"_id":6693,"_type":16,"title":6694,"_source":17,"_file":6695,"_stem":6696,"_extension":20},"/en-us/blog/inside-our-new-development-team-lead-persona",{"title":6680,"description":6681,"ogTitle":6680,"ogDescription":6681,"noIndex":6,"ogImage":6682,"ogUrl":6683,"ogSiteName":692,"ogType":693,"canonicalUrls":6683,"schema":6684},"What are the best and worst parts about being a development team lead?","Dev leads, we feel you. Here's a deep dive into our interviews with development team leads, and the new persona they informed.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668224/Blog/Hero%20Images/inside-our-new-development-team-lead-persona.jpg","https://about.gitlab.com/blog/inside-our-new-development-team-lead-persona","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What are the best and worst parts about being a development team lead?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Katherine Okpara\"}],\n        \"datePublished\": \"2019-01-18\",\n      }",{"title":6680,"description":6681,"authors":6686,"heroImage":6682,"date":6688,"body":6689,"category":14,"tags":6690},[6687],"Katherine Okpara","2019-01-18","\nWelcome back to our series on the [new GitLab personas](/handbook/product/personas/)! I recently [wrote about what we learned from product managers during interviews](/blog/inside-our-new-product-manager-persona/) for our [UX research project to develop personas](https://gitlab.com/gitlab-org/ux-research/issues/77) for all product areas. In this post, I'll share some of the insights from our efforts to better understand development team leads, and introduce the resulting persona, [Delaney](/handbook/product/personas/#delaney-development-team-lead).\n\n## The research\n\nHere are some of the findings from my [six interviews](https://gitlab.com/gitlab-org/ux-research/issues/95) conducted for the persona.\n\nDevelopment team leads are often responsible for meeting with product managers and stakeholders to discuss scheduled feature requests, convert concepts into practical solutions, ensure that capacity is properly estimated, and assign work to developers. They are also involved in other duties such as creating design and functional specifications, writing code, documenting and automating processes, and mentoring other developers.\n\n### So, what’s the hardest part about being a development team lead?\n\nDue to the nature of their work, the challenges development team leads face often cross into several domains.\n\n#### Vague requirements and poor communication\n\nIt can be difficult to know the status of certain requirements when other team members don't update the various tools that are being used. Important information can get lost along the way, which often leads to repetitive discussions or fixing incorrect work. Many of the people we spoke with are looking for ways to have this information readily accessible and consistently communicated throughout their teams.\n\n> \"Sometimes the back and forth can be annoying, when the requirements aren’t clear and I have to go back a step to understand what is going on or a component is not what I wanted. At a previous company, the back and forth was especially drawn out since the team did not work closely together. At [my current] company, this problem isn’t as severe since I work closely with the team and can quickly ask for clarification if I need to. Working more efficiently saves a lot of time.\"\n\n####  Difficulty making accurate estimations of timeline and capacity\n\nA team lead must have a good understanding of the skillsets available on their team and use this insight to balance business objectives. In order to get a better sense of the experience levels of different team members, they often hold one-on-one meetings or conduct reviews during and after a development cycle.\n\n> \" ... This goes back to the burndown chart – if it's being used correctly, it can help you see where you’ll end up. In order for that to happen, you need your estimations to be accurate. And in order for _that_ to happen you need to figure out the accuracy of the baseline and experience of the developer. For example, someone who is more junior has less of a reference point. I have to assign extra points to stories, if there are unknown variables.\"\n\n#### Delivering on time\n\nWhen demand surpasses current capacity, it can be stressful to resolve existing problems without creating new issues that result from hasty work. It can also be difficult to explain technical limitations to stakeholders who are not involved in the development process.\n\n> \"Someone might see a code review request but feel conflicted since they only have two days left to finish their own tasks. So sometimes testers and customers are waiting on these code reviews to move forward ... The biggest thing would be having all those tickets, all of those changes, closely correlated with the actual changes in Git. 'For this particular feature, here are all the changes in Git.' You don’t have to read the codebase or fire up the whole application. You have the information all in one place and don’t need to hunt down information.\"\n\n#### Changing mindsets in organizations to adopt faster, iterative approaches\n\nSome development teams are slowed down by inefficient toolchains or outdated workflows because their organizations are resistant to change and adopting new practices. Introducing new ideas and methodologies can be an especially complex process in organizations that create products for industries with more restrictions and regulations than others.\n\n> \"Most blockers that arise are put in their own way. I would prefer to iterate while they rather plan everything out for long periods of time. Their own processes get in their way because they don’t think they can move faster. Many of their processes are filled with errors and take days or weeks. They’ve always done things a certain way and are not really willing to make a change.\"\n\n### What motivates a development team lead?\n\nOne of the biggest goals for many development team leads is the drive to continually optimize processes and deliver value to the product. They must also build a level of communication that enables them to assign tasks to the appropriate people, explain why certain feature requests are or are not feasible, and continue to implement strategic solutions.\n\n### What’s the best part about being a development team lead?\n\nThe best part of being a Development Team Lead is problem solving on a variety of levels – from tools to methodologies to team relations and more. When teams are well supported by their leaders and organizations, they are better equipped to meet the expectations that will move both the product and business forward!\n\n## The persona\n\n[![Delaney, Development Team Lead persona](https://about.gitlab.com/images/blogimages/delaney-dev-team-lead-persona.png)](/handbook/product/personas/#delaney-development-team-lead)\n\n### Want to share your experiences of GitLab with me?\n\nJoin [GitLab First Look](/community/gitlab-first-look/) and help us build an even better picture of who GitLab’s users really are!\n\n[Photo](https://unsplash.com/photos/atSaEOeE8Nk) by [Steven Lelham](https://unsplash.com/@slelham) on Unsplash\n{: .note}\n",[1328,727,1144],{"slug":6692,"featured":6,"template":678},"inside-our-new-development-team-lead-persona","content:en-us:blog:inside-our-new-development-team-lead-persona.yml","Inside Our New Development Team Lead Persona","en-us/blog/inside-our-new-development-team-lead-persona.yml","en-us/blog/inside-our-new-development-team-lead-persona",{"_path":6698,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6699,"content":6705,"config":6711,"_id":6713,"_type":16,"title":6714,"_source":17,"_file":6715,"_stem":6716,"_extension":20},"/en-us/blog/marker-io-gitlab-integration",{"title":6700,"description":6701,"ogTitle":6700,"ogDescription":6701,"noIndex":6,"ogImage":6702,"ogUrl":6703,"ogSiteName":692,"ogType":693,"canonicalUrls":6703,"schema":6704},"How to radically simplify bug reporting in GitLab","Marie Hargitt from Marker.io shares how product teams can empower colleagues to report actionable issues in GitLab, without driving developers crazy.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679899/Blog/Hero%20Images/gitlab-marker-io.png","https://about.gitlab.com/blog/marker-io-gitlab-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to radically simplify bug reporting in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Marie Hargitt\"}],\n        \"datePublished\": \"2019-01-09\",\n      }",{"title":6700,"description":6701,"authors":6706,"heroImage":6702,"date":6708,"body":6709,"category":14,"tags":6710},[6707],"Marie Hargitt","2019-01-09","\n\nIf you’re like us, you’re constantly pushing out new features and improvements to your product, but with those updates and changes comes the inevitable risk of bugs. The best way to find and fix those bugs are your internal reporters and developers, but getting the whole team to report bugs into GitLab can be hard.\n\nWhether it’s your copywriters on the lookout for wonky content, your QA testers that find a broken form, designers that spot a font size five times too big, or your customer support team receiving word that a billing issue is blocking customers from paying – reporters can take forever to send actionable feedback to developers, who in turn don’t always get the information they need to smash those bugs.\n\n## What a bug-reporting workflow usually looks like ...\n\n### ... for reporters\n\nBecause reporters aren’t always super tech-savvy, it can be tricky for them to share reports that are helpful for your developers. The process is long, complicated, and tracking down the crucial technical information isn’t always easy.\n\nIn most teams, reporting bugs into GitLab looks like this:\n1. Find the bug.\n1. Open screenshot tool, capture bug.\n1. Open software to annotate screenshot, add comments.\n1. Open and log into GitLab.\n1. Select the correct project.\n1. Create new issue.\n1. Document the bug. (How exactly do I do this!?)\n1. Add technical information. (What is this even?)\n1. Attach screenshots.\n1. And then finally: submit report.\n\nThat’s a whopping 10 steps to report even the smallest bugs.\n\nAnd we didn’t even mention the super-fun scavenger hunt reporters have to go on to identify all of the environmental data developers need to even start thinking about fixing the bugs.\n\n### ... for developers\n\nDevelopers get feedback flying at them in all forms – emails, phone calls, sticky notes and screenshots.\n\nThey’re ready to gouge their eyes out because they can’t reproduce the reported bugs, because they’re not receiving actionable feedback from the get-go, and they don’t have time to investigate all the bug reports they receive.\n\n## So what can you do to make sure everyone can contribute?\n\n### Speed up workflow for reporters\n\nWe created Marker.io to speed up and simplify your team bug reporting. Now, those 10 steps are only three:\n\n1. Capture and annotate screenshot of bug.\n1. Send bug reports straight to your GitLab project.\n1. Keep hunting for more bugs!\n\nOne real-life example is an issue we ran into with our pricing page a while back. During our QA process, we noticed a weird bug: the price for our Team Plan was mysteriously missing. Instead of using the lengthy process mentioned earlier in this post, we used Marker.io to quickly send feedback to our dev team and get the bug fixed in no time.\n\nThis is what reporting the issue with Marker.io looked like:\n\n![Creating the bug report issue in GitLab](https://about.gitlab.com/images/blogimages/GitLab-creating-issue-Marker-io.gif){: .shadow.center.medium}\n\nNow, not only is the process much faster, but you never have to leave your website, there is nothing to configure, and all the technical data the developers need is automatically captured by Marker.io.\n\n### Create actionable reports for your developers\n\nOnce a visual feedback tool like Marker.io is introduced into the equation your developers can choose where they receive feedback, down to the specific bug-tracking GitLab project, and the important technical data they need is automatically grabbed and included in every bug report.\n\nThat means environment data, including:\n- Browser\n- Operating system (OS) and version\n- Screen size\n- Zoom level\n- Pixel ratio\n\nHere’s an example of what a Marker.io bug report looks like in GitLab:\n\n![The bug report issue inside GitLab](https://about.gitlab.com/images/blogimages/GitLab-issue-created-with-Marker-io.gif){: .shadow.center.medium}\n\nThis GitLab issue has all the information needed for your developers to act on it:\n\n- The issue is in the correct project.\n- Any pre-set epics, milestones or labels are included.\n- The issue is assigned to a team member.\n- The annotated screenshot is attached.\n- The expected and actual results are well documented.\n- The steps to reproduce are detailed.\n- The technical environment information is all there.\n- The issue has the URL where the screenshot was captured.\n- The issue has a due date.\n\nNo more wasted time following up with reporters to fill in the gaps. It’s all there, organized directly in your chosen GitLab project – complete with everything vital to fix your bugs.\n\nWant to try for yourself? Marker.io comes with a free 15-day trial. Give it go ➡️ [Marker.io/gitlab](https://marker.io/gitlab?utm_source=gitlab&utm_medium=post&utm_campaign=gitlab_bug_reporting)\n\n### About the guest author\n\nMarie Hargitt is the Marketing Manager of [Marker.io](https://marker.io/gitlab), a powerful tool that makes bug reporting and visual feedback easy for the whole team.\n",[1347,232,727],{"slug":6712,"featured":6,"template":678},"marker-io-gitlab-integration","content:en-us:blog:marker-io-gitlab-integration.yml","Marker Io Gitlab Integration","en-us/blog/marker-io-gitlab-integration.yml","en-us/blog/marker-io-gitlab-integration",{"_path":6718,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6719,"content":6725,"config":6732,"_id":6734,"_type":16,"title":6735,"_source":17,"_file":6736,"_stem":6737,"_extension":20},"/en-us/blog/configure-post",{"title":6720,"description":6721,"ogTitle":6720,"ogDescription":6721,"noIndex":6,"ogImage":6722,"ogUrl":6723,"ogSiteName":692,"ogType":693,"canonicalUrls":6723,"schema":6724},"GitLab restructures to boost cross-functional collaboration","Implementing a new structure sounds like a big change, but our Configure group is here to give you the scoop.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678839/Blog/Hero%20Images/inside-look-at-new-cross-functional-teams-at-gitlab.jpg","https://about.gitlab.com/blog/configure-post","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We restructured to allow better cross-functional collaboration — here's how it's going.\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emily von Hoffmann\"}],\n        \"datePublished\": \"2018-12-13\",\n      }",{"title":6726,"description":6721,"authors":6727,"heroImage":6722,"date":6729,"body":6730,"category":14,"tags":6731},"We restructured to allow better cross-functional collaboration — here's how it's going.",[6728],"Emily von Hoffmann","2018-12-13","\nHello world, meet the GitLab Configure group! They’re the folks hard at work improving [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/), the [Kubernetes integration](https://docs.gitlab.com/ee/user/project/clusters/), and all the related applications on GitLab. They, like the rest of the GitLab engineering function, recently changed how they work together when we split up into devops stage groups according to [product area](/handbook/product/categories/). Each group contains a product manager, UX designer, several engineers, and other contributors. They still belong to [teams](/company/team/structure/#team-and-team-members) with others usually in their same role, but they work together as a [group dedicated to a stage](/company/team/structure/#stage-groups) of the product lifecycle. \n\n![meet the configure group](https://about.gitlab.com/images/blogimages/configure-team.jpg){: .shadow.medium.center}\n\nSo far, Configure group members say this has helped them stay focused and connected. Staff UX Designer Taurie Davis explains that while she used to have to switch gears and spend time getting caught up on different product areas, she can now hone in on finding solutions to familiar problems. Having a stable group of collaborators also promotes shared learning, because they’re working together on the same issues at the same time. Product Manager Daniel Gruesso also sees benefits in having a dedicated set of people for each product area; they enjoy more latitude and no longer face as much competition for getting their work prioritized. These are all benefits of [stable counterparts](/handbook/leadership/#stable-counterparts), or people in different functions, departments, or teams who routinely work together, easing communication to avoid conflict and the [downsides of a matrix organization](/handbook/leadership/#no-matrix-organization).\n\nSome of the challenges that drove this change have been echoed in our user research, with cross-group communication a common and recurring roadblock. In our [2018 Global Developer Report](/developer-survey/previous/2018/), a quarter of engineers indicated that they feel siloed, and lack visibility into what their colleagues in operations, product, and security are working on. \n\nThis was reinforced in recent interviews by our UX research team, where many [developers](https://drive.google.com/file/d/1EVrjVcgIBbuNf4Gwenajsiy6Wv9HsTJw/view) we spoke with said that they’re frustrated by changing requirements and scope creep, and pinpointed poor communication with and empathy for other teams as the cause. Their colleagues in [operations roles](https://drive.google.com/file/d/1A5mSNoPJydjcWKE4rdO2287sjnABxGDA/view) face a similar challenge in convincing others to invest cycles in proactive work that can save them time and stress in the future. Although implementing stable groups may seem like a big change, we’ve seen positive results and hope that sharing our experience may help others take the plunge. \n\nI recently caught up with a few members of the Configure group, read on for their perspectives on how it’s been going.\n\n### Can you each introduce yourself and explain your role?\n\n**Dylan:** Hi I'm [Dylan](/company/team/#DylanGriffith). I'm the [Backend Engineering Manager](https://handbook.gitlab.com/job-families/engineering/backend-engineer/#engineering-manager/) for the [Configure](/handbook/product/categories/) group.\n\n**Thong:** I’m new here! I’m [Thong](/company/team/#thongkuah) and I'm a senior [backend engineer](https://handbook.gitlab.com/job-families/engineering/backend-engineer/). \n\n**Mayra:** I'm [Mayra](/company/team/#may_cabrera) and I'm a [backend engineer](https://handbook.gitlab.com/job-families/engineering/backend-engineer/) for the Configure group.\n\n**Taurie:** I'm [Taurie](/company/team/#tauried), the UX designer for the Configure group. I work closely with product and engineering to help shape the overall user experience of our products.\n\n**Daniel:** I'm [Daniel](/company/team/#danielgruesso), the [Product Manager](https://handbook.gitlab.com/job-families/product/product-manager/) for the Configure group. In short I have to make sure that the features we ship are aligned to our vision; this involves interacting with everyone from customers to the CEO. Working together with engineering, UX, and leadership we make our vision a reality.\n\n### In general, how do you think it's going so far?\n\n**Dylan:** So far the stable devops stage group is working well. I believe that backend teams already were well focused on specific product areas, but I think the addition of focused UX and frontend engineers on our product area helps in a few ways. First, we know who to talk to about UX decisions, and the UX designer and frontend engineers have good context across the feature set and often have good insights based on this context. Backend engineers also get to collaborate and form better working relationships with UX and frontend, and as a consequence we communicate more effectively in general.\n\n### What are some of the big differences that arise from working with people in different roles, versus working more with people who share your background? \n\n**Thong:** Key for me is the different strengths and perspectives that we all bring into the group. I'm pleasantly surprised how well our strengths overlap and support the group. Because we come from different perspectives, I feel we can often challenge each other constructively and check that we are heading in the right direction with respect to achieving the best value for the product.\nThe challenges I have seen in the past would be establishing a common understanding of the group's goals, which sometimes might not be exactly aligned with each department's goals.\n\n**Mayra:** Thong’s answer is a really good one. I'm also quite impressed by how our strengths bolster the group productivity.\n\n**Taurie:** We bring different perspectives to the table which can only improve the product in the end. It also greatly improves communication and allows us to work together instead of in progression. I, personally, have also learned a ton by working so closely with both product and engineering on a daily basis.\n\n**Daniel:** I greatly benefit from learning the technical aspects of our work; if I only interacted with other product folks I would surely not learn as much. This is by far the job where I've learned the most in the shortest amount of time. I love that.\n\n### How has the new structure impacted your day to day? \n\n**Mayra:** For me, it has had a positive impact because now I'm focused on developing particular features for certain areas of GitLab, like the Kubernetes integration and Auto DevOps.\n\n**Dylan:** We have deeper conversations with UX designers and frontend engineers as they understand our product set well. Ownership from the UX designer means that as an engineer I feel less stressed about resolving UX decisions or making issues for UX issues, as I can see that Taurie will often take responsibility for seeing this through.\n\n### How have you tried to bond as a new group? \n\n**Dylan:** We are bonding quite well. We started with daily standups and worked our way down to twice weekly. The daily standup really accelerated the bonding between group members and has resulted in fairly healthy collaboration and high levels of trust between group members. We've also done 1 retro as a full group, which I believe was a more comfortable and open environment as a consequence of us bonding for some time before hand.\n\n**Mayra:** At the last GitLab [Summit](/events/gitlab-contribute/), we had our first on site dinner; sadly, Thong was not able to join us, so we'll need to update this picture on the next summit!\n\n![team dinner](https://about.gitlab.com/images/blogimages/configure-team-dinner.png){: .shadow.medium.center}\n\n**Taurie:** We also have [coffee break calls](/culture/all-remote/#coffee-break-calls) regularly with different group members as a way to discuss things outside of work and continue to strengthen the connection between group members. Our monthly group retrospectives are a great way to discuss what is working well within our group, what has been on our minds, and what we can improve for greater collaborations and results.\n\n**Daniel:** I always try to start calls with a personal touch, no matter how small, I've found it sets people at ease. We plan synchronously and clear up any doubts on the work before starting. Once we're aligned we mostly catch up asynchronously.\n\n### Are there any previous problems, delays, or frustrations that have been resolved or prevented in the new structure?\n\n**Dylan:** Difficult to say, but at a high level we had many discussions before forming this group along the lines of engineers waiting for issues to be labeled \"UX Ready\" before starting work on any feature. But now as a group we've come to realize that we're all involved from the beginning to the end, and engineers are responsible for ensuring the UX makes sense and the UX designer is also responsible for ensuring the final product makes sense. We also regularly have UI contributions from Taurie which saves the round trip of commenting on the MR and waiting for the engineer to make the changes.\n\n**Taurie:** Shifting between multiple different product areas made it much more difficult to learn and keep up to date with the more technical areas of our product such as Kubernetes, and Auto DevOps. Being integrated into a group who is constantly working on these features means I have more domain knowledge and can more confidently answer questions related to the user experience of our area.\n\n### What are you most excited to tackle together, and what can we look forward to seeing from the group?\n\n**Dylan:** \nWe're focusing on making Auto DevOps clearer to users; making more decisions based on research rather than relying on industry trends; building a large and engaged user base for Auto DevOps that helps guide us to make better decisions by collaborating on issues; and improving the code architecture so that frontend engineers are more empowered to build better UX without the need to involve backend engineers (eg. more user experience handled in pure frontend Javascript code).\n\n**Taurie:** I am excited to see the group continue to grow and tackle issues that improve the Auto DevOps experience so that it is widely used among GitLab users.\n\n**Daniel:** I am definitely excited to participate in some of our big initiatives like [serverless](/topics/serverless/) and PaaS. In the near future you can look forward to group-level Kubernetes clusters as well as some great Auto DevOps improvements like the ability to initialize and migrate databases.\n\n### Anything else you want to share?\n\n**Daniel:** We're always looking for [engineers](/jobs/). Familiarity with Kubernetes and expertise with ruby will help you land an interview.\n\n**Dylan:** Try out [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) and don't be afraid to create issues for us if you run into trouble. We love hearing from our users!\n\nCover image by [rawpixel](https://unsplash.com/@rawpixel) on [Unsplash](https://unsplash.com/).\n{: .note}\n",[749,1347],{"slug":6733,"featured":6,"template":678},"configure-post","content:en-us:blog:configure-post.yml","Configure Post","en-us/blog/configure-post.yml","en-us/blog/configure-post",{"_path":6739,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6740,"content":6746,"config":6752,"_id":6754,"_type":16,"title":6755,"_source":17,"_file":6756,"_stem":6757,"_extension":20},"/en-us/blog/gitlab-hackerone-bug-bounty-program-is-public-today",{"title":6741,"description":6742,"ogTitle":6741,"ogDescription":6742,"noIndex":6,"ogImage":6743,"ogUrl":6744,"ogSiteName":692,"ogType":693,"canonicalUrls":6744,"schema":6745},"GitLab's HackerOne Bug Bounty Program is public today","With 200 reported vulnerabilities and $200,000 awarded already, our bug bounty program is now public and open for your contributions.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666816/Blog/Hero%20Images/security-cover.png","https://about.gitlab.com/blog/gitlab-hackerone-bug-bounty-program-is-public-today","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's HackerOne Bug Bounty Program is public today\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kathy Wang\"}],\n        \"datePublished\": \"2018-12-12\",\n      }",{"title":6741,"description":6742,"authors":6747,"heroImage":6743,"date":6749,"body":6750,"category":14,"tags":6751},[6748],"Kathy Wang","2018-12-12","\nToday, we are happy to announce that our [HackerOne bug bounty program](https://hackerone.com/gitlab) is now public. Since we opened our private bounty program in December 2017, we have been preparing to take this program public by working through some of the challenges of managing a bug bounty program. We have awarded over $200,000 in bounties since the bug bounty program went live last year. This means we mitigated nearly 200 vulnerabilities reported to us.\n\nOur first response time to newly submitted findings has decreased significantly, from an average of 48+ hours to just seven. That is a significant reduction achieved through security automation, and will help us scale, as well as better engage the hacker community.\n\nOn average, our mean time to mitigation (MTTR) for critical security issues is currently fewer than 30 days. Our current goal is to now focus on bringing the MTTR metric for medium-high security issues to under 60 days, on average.\n\nYesterday, we released a [webinar](https://www.hackerone.com/resources/gitlab-hps-for-startups) to announce our plans to be a public bug bounty program. In managing a [public bug bounty program](https://hackerone.com/gitlab), we will now be able to reward our hacker community for reporting security vulnerabilities to us directly through the program.\n\nThe past year has been a great journey of learning about managing such a program, and we have plans to further expand upon our public program in 2019 and beyond. We would also like to acknowledge some of our top contributors from the hacker community, including [ngalog](https://hackerone.com/ngalog), [jobert](https://hackerone.com/jobert), and [fransrosen](https://hackerone.com/fransrosen).\n\nCheck out the [program](https://hackerone.com/gitlab) to see how you can contribute!\n",[268,1307],{"slug":6753,"featured":6,"template":678},"gitlab-hackerone-bug-bounty-program-is-public-today","content:en-us:blog:gitlab-hackerone-bug-bounty-program-is-public-today.yml","Gitlab Hackerone Bug Bounty Program Is Public Today","en-us/blog/gitlab-hackerone-bug-bounty-program-is-public-today.yml","en-us/blog/gitlab-hackerone-bug-bounty-program-is-public-today",{"_path":6759,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6760,"content":6766,"config":6772,"_id":6774,"_type":16,"title":6775,"_source":17,"_file":6776,"_stem":6777,"_extension":20},"/en-us/blog/friends-dont-let-friends-add-options-to-code",{"title":6761,"description":6762,"ogTitle":6761,"ogDescription":6762,"noIndex":6,"ogImage":6763,"ogUrl":6764,"ogSiteName":692,"ogType":693,"canonicalUrls":6764,"schema":6765},"Friends don't let friends add options to code","Creating optional features burdens users and applications – here's how we avoid adding options.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678953/Blog/Hero%20Images/options.jpg","https://about.gitlab.com/blog/friends-dont-let-friends-add-options-to-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Friends don't let friends add options to code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2018-12-10\",\n      }",{"title":6761,"description":6762,"authors":6767,"heroImage":6763,"date":6769,"body":6770,"category":14,"tags":6771},[6768],"Suri Patel","2018-12-10","\nSometimes, when trying to make it easier to work in an application, our instinct is to add\noptional features that users can enable if their situations require a specific functionality.\nOur intentions may be good, but these actions can actually cause _more_ problems, since we invite users\n to second-guess their choices by adding extra steps into the user experience.\n\n## The disadvantages of a [choose your own adventure](https://en.wikipedia.org/wiki/Choose_Your_Own_Adventure) model\n\nOne of the most celebrated aspects of [open source](/solutions/open-source/)\nis the freedom that allows developers to brighten a user’s day by adding an\noptional feature that may not be for everyone, but allows a small portion of users\nto engage with a project in a specific way. While it may seem like a great idea\nto cater to individual needs, there are several disadvantages to making something\nan option.\n\n### It creates more work for developers\n\nCreating extra options means more work for both frontend and backend teams.\nThese features add additional code, tests, and documentation for each setting,\nand the various states alter the UI. Adding options hurts you in every step of\nthe development process.\n\n### It places a burden on the user to choose\n\nWhen we solve problems by including options, we force a user to think about the\nfunction and consider its purpose and drawbacks, placing a burden on them to\ncontrol how they use an application. A user hesitates and has to make a decision\nabout whether this is something that should be enabled. After all, if an option\nsignificantly enhanced the user experience, then wouldn’t it have been automatically\nintegrated?\n\n### It makes future functionality more difficult to implement\n\nThere's also the long-term impact of additional options. Just one extra option can lead to one of two\npaths, which might influence other parts of an application. So, every\ntime we add an option, the number of states of the application doubles. That's\nexponential growth and it adds up quickly, making it harder to diagnose errors. Multiple\noptions can lead to the creation of states of which we’re unaware, so\nit’s harder for the user to understand how an application should behave, because\nthey don't know whether errors are due to an option or not. And, if it is an\noption causing the error, _which_ option is the problem?\n\n## How we avoid adding options: Bask in the glow of iteration\n\nSo, how do you know if a feature should be optional or not? At GitLab, we ship\nthe first [iteration](https://handbook.gitlab.com/handbook/values/#iteration) and keep delivering based on\nuser feedback. Some of the features that we anticipated may never roll out,\nbecause users didn’t request them. Iteration allows us to reduce the scope of\ndevelopment and avoid including features that aren’t popular or useable.\n\nWhenever users need something new, try to create a solution that's acceptable\nfor the most number of people. Rely on your development and operations teams to\nprovide feedback and ask them to relate to the end user. Conducting\n[UX research](/handbook/product/ux/ux-research/#ux-research) with your users\nalso helps identify pain points and needs.\n\nTeams are continually constrained by development capacity, and adding options to\napplications can absorb previous time and effort. We suggest shipping your\napplication without an option and waiting to see whether people request it or\nmake a\n[feature proposal](https://gitlab.com/gitlab-org/gitlab-ce/issues?label_name%5B%5D=feature+proposal)\nfor it. In the end, our role is to solve users’ problems, and our goal is to\nidentify the underlying cause of a challenge and fix it in a way that doesn't\nneed an option.\n\n[Cover image](https://unsplash.com/photos/pKeF6Tt3c08) by [Brendan Church](https://unsplash.com/@bdchu614) on Unsplash\n{: .note}\n",[915,1144,703,727],{"slug":6773,"featured":6,"template":678},"friends-dont-let-friends-add-options-to-code","content:en-us:blog:friends-dont-let-friends-add-options-to-code.yml","Friends Dont Let Friends Add Options To Code","en-us/blog/friends-dont-let-friends-add-options-to-code.yml","en-us/blog/friends-dont-let-friends-add-options-to-code",{"_path":6779,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6780,"content":6785,"config":6789,"_id":6791,"_type":16,"title":6792,"_source":17,"_file":6793,"_stem":6794,"_extension":20},"/en-us/blog/git-protocol-v2-enabled-for-ssh-on-gitlab-dot-com",{"title":6781,"description":6782,"ogTitle":6781,"ogDescription":6782,"noIndex":6,"ogImage":2478,"ogUrl":6783,"ogSiteName":692,"ogType":693,"canonicalUrls":6783,"schema":6784},"Git Protocol v2 now enabled for SSH on GitLab.com","Fetch faster using Git Protocol v2 – here's how.","https://about.gitlab.com/blog/git-protocol-v2-enabled-for-ssh-on-gitlab-dot-com","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Git Protocol v2 now enabled for SSH on GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"James Ramsay\"}],\n        \"datePublished\": \"2018-12-10\",\n      }",{"title":6781,"description":6782,"authors":6786,"heroImage":2478,"date":6769,"body":6787,"category":14,"tags":6788},[6217],"\n\nGitLab added support for [Git Protocol v2 over HTTP and SSH in GitLab 11.4](/releases/2018/10/22/gitlab-11-4-released/#git-protocol-v2), and enabled Protocol v2 over HTTP on GitLab.com, but not for SSH. On Nov. 23, we enabled [Git Protocol v2 over SSH on GitLab.com](https://gitlab.com/gitlab-com/gl-infra/infrastructure/issues/5244). You can view Git Protocol usage on our [public dashboard](https://dashboards.gitlab.com/d/pqlQq0xik/git-protocol-versions?refresh=5m&orgId=1).\n\nGit Protocol v2 is supported from Git v2.18.0 and is opt-in. To enable globally, run `git config --global protocol.version 2`.\n\n## What Git Protocol v2 means for you\n\nGit 2.18 introduced support for Protocol v2, which defines how clones, fetches, and pushes are communicated between the client (your computer) and the server (GitLab). The new [wire protocol](https://www.kernel.org/pub/software/scm/git/docs/technical/pack-protocol.html) improves the performance of fetch commands and enables future protocol improvements. [Read more about Protocol v2](https://opensource.googleblog.com/2018/05/introducing-git-protocol-version-2.html) in the release post by the author of the change.\n\nTo see the reduction in network traffic with Protocol v2 you can run the commands below:\n\n```\n# Original Git wire protocol\nGIT_TRACE_PACKET=1 git -c protocol.version=0 ls-remote git@gitlab.com:gitlab-org/gitlab-ce.git master\n\n# New Git wire protocol v2\nGIT_TRACE_PACKET=1 git -c protocol.version=2 ls-remote git@gitlab.com:gitlab-org/gitlab-ce.git master\n```\n\nIn moving from Protocol v0 to v2, on this repo the number of lines (\"packets\") sent behind the scenes drops from over 36,000 to fewer than 30.\n",[702,1286,2331],{"slug":6790,"featured":6,"template":678},"git-protocol-v2-enabled-for-ssh-on-gitlab-dot-com","content:en-us:blog:git-protocol-v2-enabled-for-ssh-on-gitlab-dot-com.yml","Git Protocol V2 Enabled For Ssh On Gitlab Dot Com","en-us/blog/git-protocol-v2-enabled-for-ssh-on-gitlab-dot-com.yml","en-us/blog/git-protocol-v2-enabled-for-ssh-on-gitlab-dot-com",{"_path":6796,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6797,"content":6802,"config":6808,"_id":6810,"_type":16,"title":6811,"_source":17,"_file":6812,"_stem":6813,"_extension":20},"/en-us/blog/gitlab-runner-update-required-to-use-auto-devops-and-sast",{"title":6798,"description":6799,"ogTitle":6798,"ogDescription":6799,"noIndex":6,"ogImage":4861,"ogUrl":6800,"ogSiteName":692,"ogType":693,"canonicalUrls":6800,"schema":6801},"GitLab Runner update required to use SAST in Auto DevOps","Make sure you upgrade GitLab Runner to 11.5+ to coninue using SAST in Auto DevOps.","https://about.gitlab.com/blog/gitlab-runner-update-required-to-use-auto-devops-and-sast","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Runner update required to use SAST in Auto DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fabio Busatto\"}],\n        \"datePublished\": \"2018-12-06\",\n      }",{"title":6798,"description":6799,"authors":6803,"heroImage":4861,"date":6805,"body":6806,"category":14,"tags":6807},[6804],"Fabio Busatto","2018-12-06","\n\nWe are introducing a major change for the [SAST] job definition for [Auto DevOps] with **GitLab 11.6**, shipping Dec. 22.\nAs a result, SAST jobs will fail after the upgrade to GitLab 11.6 if they are picked up by a version of [GitLab Runner]\nprior to 11.5. The jobs will fail, but they will not block pipelines. However, you won't see results\nfor SAST in the merge request or at the pipeline level anymore.\n\nThe same change will happen for [Dependency Scanning], [Container Scanning], [DAST], and [License Management] in future releases.\n\n## Why did this happen?\n\nThe [new job definition] uses the [`reports` syntax], which is necessary to show SAST results in the [Group Security Dashboard].\nUnfortunately, this syntax is not supported by GitLab Runner prior to 11.5.\n\n## Who is affected?\n\nYou are affected by this change if you meet **all** the requirements in the following list:\n1. You are using Auto DevOps **AND**\n1. you have at least one GitLab Runner 11.4 or older set up for your projects **AND**\n1. you are interested in security reports.\n\n## Who is not affected?\n\nYou are **not** affected by this change if you meet **at least one** of the requirements in the following list:\n1. You are not using Auto DevOps **OR**\n1. you are using only GitLab Runner 11.5 or newer **OR**\n1. you are using only shared runners on GitLab.com (we already upgraded them) **OR**\n1. you are not interested in security reports.\n\n## How to solve the problem\n\nIf you are not affected by the change, you don't need to take any action.\n\nIf you are affected, you should upgrade your GitLab Runners to version 11.5 or newer as soon as possible.\nIf you don't, you will not have new SAST reports until you do upgrade. If you upgrade your runners later, SAST will\nstart to work again correctly.\n\n## Which is the expected timeline?\n\nGitLab 11.6 will be released on **Dec. 22**.  This change may also be shipped in an early release\ncandidate (RC) version.\n\nIf you are using a **self-managed** GitLab instance, and you don't install RC versions, you will be affected when\nyou'll upgrade to GitLab 11.6.\n\nIf you are using **GitLab.com**, you will be affected as soon as the RC version with the change will be deployed.\n\nFeel free to reach out to us with any further questions!\n\n[SAST]: https://docs.gitlab.com/ee/user/application_security/sast/\n[Auto DevOps]: https://docs.gitlab.com/ee/topics/autodevops/\n[new job definition]: https://docs.gitlab.com/ee/user/application_security/sast/\n[`reports` syntax]: https://docs.gitlab.com/ee/ci/yaml/#artifactsreportssast-ultimate\n[Group Security Dashboard]: https://docs.gitlab.com/ee/user/application_security/security_dashboard/\n[GitLab Runner]: https://docs.gitlab.com/runner/\n[Dependency Scanning]: https://docs.gitlab.com/ee/user/application_security/dependency_scanning/\n[Container Scanning]: https://docs.gitlab.com/ee/user/application_security/container_scanning/\n[DAST]: https://docs.gitlab.com/ee/user/application_security/dast/\n[License Management]: https://docs.gitlab.com/ee/user/compliance/license_compliance/index.html\n",[832,894,749,2331,1307],{"slug":6809,"featured":6,"template":678},"gitlab-runner-update-required-to-use-auto-devops-and-sast","content:en-us:blog:gitlab-runner-update-required-to-use-auto-devops-and-sast.yml","Gitlab Runner Update Required To Use Auto Devops And Sast","en-us/blog/gitlab-runner-update-required-to-use-auto-devops-and-sast.yml","en-us/blog/gitlab-runner-update-required-to-use-auto-devops-and-sast",{"_path":6815,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6816,"content":6822,"config":6829,"_id":6831,"_type":16,"title":6832,"_source":17,"_file":6833,"_stem":6834,"_extension":20},"/en-us/blog/availability-postgres-patroni",{"title":6817,"description":6818,"ogTitle":6817,"ogDescription":6818,"noIndex":6,"ogImage":6819,"ogUrl":6820,"ogSiteName":692,"ogType":693,"canonicalUrls":6820,"schema":6821},"Introducing Patroni as the Postgres Failover Manager on GitLab.com","GitLab.com is introducing Patroni as the Postgres Failover Manager on GitLab.com.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671280/Blog/Hero%20Images/gitlab-gke-integration-cover.png","https://about.gitlab.com/blog/availability-postgres-patroni","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing Patroni as the Postgres Failover Manager on GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gerardo Lopez-Fernandez\"}],\n        \"datePublished\": \"2018-12-05\",\n      }",{"title":6817,"description":6818,"authors":6823,"heroImage":6819,"date":6825,"body":6826,"category":14,"tags":6827},[6824],"Gerardo Lopez-Fernandez","2018-12-05","\n\n## Upcoming Maintenance Windows for Patroni Deployment\n\nWe are writing this post to let our community know we are planning on performing the work necessary \nto deploy [Patroni](https://github.com/zalando/patroni) as the Postgres Failover Manager on GitLab.com over two weekends: a dry-run to test\nour migration plan and tools on Saturday, Dec 8, 2018, and the actual deployment on Saturday, December\n15, 2018.\n\nDuring the maintenance windows, the following services will be unavailable:\n\n* SaaS website ([GitLab.com](https://gitlab.com/) will be offline, but [about.gitlab.com](https://about.gitlab.com/) and [docs.gitlab.com](https://docs.gitlab.com/) will still be available)\n* Git ssh\n* Git https\n* registry\n* CI/CD\n* Pages\n\n### Maintenance Window - Dry run - Saturday, December 8 at 13:00 UTC\n\nWe will perform testing and validation of our deployment procedures and tools during this maintenance\nwindow to do final readiness checks. This maintenance window should last 30 minutes.\n\n### Maintenance Window - Actual Cutover - Saturday, December 15 at 13:00 UTC\n\nOn the day of the cutover, we are planning to start at 13:00 UTC.  The time window for GitLab.com to be\nin maintenance is currently planned to be 30 minutes. Should any times for this change, we will be updating\non the channels listed below. When this window is completed GitLab.com will be running Patroni.\n\n* [GitLab Status page](https://status.gitlab.com/)\n* [GitLab Status Twitter](https://twitter.com/gitlabstatus)\n\n",[749,749,1646,6828,1646,1445],"bug bounty",{"slug":6830,"featured":6,"template":678},"availability-postgres-patroni","content:en-us:blog:availability-postgres-patroni.yml","Availability Postgres Patroni","en-us/blog/availability-postgres-patroni.yml","en-us/blog/availability-postgres-patroni",{"_path":6836,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6837,"content":6843,"config":6849,"_id":6851,"_type":16,"title":6852,"_source":17,"_file":6853,"_stem":6854,"_extension":20},"/en-us/blog/microservices-integrated-solution",{"title":6838,"description":6839,"ogTitle":6838,"ogDescription":6839,"noIndex":6,"ogImage":6840,"ogUrl":6841,"ogSiteName":692,"ogType":693,"canonicalUrls":6841,"schema":6842},"Tackling the microservices repository explosion challenge","Microservices have spawned an explosion of dependent projects with multiple repos, creating the need for an integrated solution – we're working on it right now.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662898/Blog/Hero%20Images/microservices-explosion.jpg","https://about.gitlab.com/blog/microservices-integrated-solution","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"It's raining repos: The microservices repo explosion, and what we're doing about it\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2018-11-26\",\n      }",{"title":6844,"description":6839,"authors":6845,"heroImage":6840,"date":6846,"body":6847,"category":14,"tags":6848},"It's raining repos: The microservices repo explosion, and what we're doing about it",[6626],"2018-11-26","\nGone are the days of \"set it and forget it\"-style software development. The increased demand for code and operations on all projects, especially [microservices](/topics/microservices/), means more repos. This calls for a more integrated solution to incorporate testing, security updates, monitoring, and more, says GitLab CEO [Sid Sijbrandij](/company/team/#sytses):\n\n>\"The bar's going up for software development. It's no longer enough to just write the code; you also have to write the tests. It's no longer enough to just ship it; you also have to monitor it. You can no longer make it once and forget about it; you have to stay current with security updates. For every product you make you have to integrate more of these tools. It used to be that only the big projects got all these things, but now every single service you ship should have these features, because other projects are dependent on it. One security vulnerability can be enough to take a company down.\"\n\nAn increasing number of project repos means exponential growth in the number of tools needed to handle them – bad news for those saddled managing project dependencies. A streamlined workflow is essential to alleviate this burden – here's how we want to help you get there.\n\n### Everything under one roof\n\n\"With GitLab, we want to enable you to simply commit your code and have all the tools you need integrated out of the box,\" Sid said. \"You don't have to do anything else. It's monitored; we measure whether your dependencies have a vulnerability and fix it for you automatically. I think that's the big benefit of GitLab; that you don't have to go into stitching together 10 tools for every project that you make.\"\n\nBy using an integrated solution to manage an ever-growing number of microservices, you can avoid having engineers siloed off with their respective teams and tools. Creating visibility among teams and getting rid of the need for handoffs leads to a faster [DevOps lifecycle](/topics/devops/) while also ensuring that your projects deploy and remain stable, Sid explains.\n\n\"Our customers that switched from a fragmented setup and were only able to get projects through that cycle a few times a year are now deploying a few times a week,\" Sid said. \"The ability to go from planning to monitoring it in production is what GitLab brings to the table. We have an ample amount of customer case studies showing how we helped improve their speed.\"\n\n### Better support for microservices\n\nWe are boning up our support of microservices, and have a number of features in the works to improve this area, including [group level Kubernetes clusters](https://gitlab.com/gitlab-org/gitlab-ce/issues/34758), a [global Docker registry browser](https://gitlab.com/gitlab-org/gitlab-ce/issues/49336), and adding the [ability to define multiple pipelines](https://gitlab.com/gitlab-org/gitlab-ce/issues/22972). This is to build on what's already there:\n\n\"We have great support for microservices. GitLab has [multi-project pipelines](/blog/use-multiproject-pipelines-with-gitlab-cicd/) and [can trigger pipelines from multi-projects via API](https://docs.gitlab.com/ee/ci/jobs/ci_job_token.html),\" Sid detailed. \"The CI Working Group of the CNCF (Cloud Native Computing Foundation), the most cloud native organization in the world probably, uses GitLab to test their projects. We've got great support for things like [Kubernetes](/solutions/kubernetes/) and cloud native technologies. In GitLab, every project you have can be attached to a Kubernetes cluster, and GitLab uses that to run everything that’s going on. We know that a lot of our users and customers are using microservices, and we work great with them.\"\n\n### Future focus: best-in-class solutions\n\nGitLab is much more than just version control. Having started with the planning, creating and verifying stages in 2011 and 2012, we’ve had time to make those capabilities very strong. We are now strengthening our offerings in the other steps of the DevOps lifecycle: managing, packaging, releasing, configuring, monitoring and security.\n\n\"We are seeing enormous progress in those areas, but they can't go head to head with the best-in-class solutions just yet. So that's going be the theme for GitLab next year, to make sure each of our solutions is best in class instead of just the three things we started with,\" Sid says. \"And we won't take our eyes off the ball.\"\n\n[Cover image](https://unsplash.com/photos/wplxPRCF7gA) by [Ruben Bagues](https://unsplash.com/@rubavi78) on Unsplash\n{: .note}\n",[832,232,1002,1307,1328],{"slug":6850,"featured":6,"template":678},"microservices-integrated-solution","content:en-us:blog:microservices-integrated-solution.yml","Microservices Integrated Solution","en-us/blog/microservices-integrated-solution.yml","en-us/blog/microservices-integrated-solution",{"_path":6856,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6857,"content":6862,"config":6868,"_id":6870,"_type":16,"title":6871,"_source":17,"_file":6872,"_stem":6873,"_extension":20},"/en-us/blog/new-elasticsearch-version-requirements",{"title":6858,"description":6859,"ogTitle":6858,"ogDescription":6859,"noIndex":6,"ogImage":4861,"ogUrl":6860,"ogSiteName":692,"ogType":693,"canonicalUrls":6860,"schema":6861},"GitLab 11.5 adds Elasticsearch 6, removes ES 5.5 support","GitLab 11.5 will support Elasticsearch version 6 and 5.6, sunsetting support for versions 5.5 and earlier.","https://about.gitlab.com/blog/new-elasticsearch-version-requirements","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab 11.5 to support Elasticsearch 6, sunset support for Elasticsearch 5.5\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mario de la Ossa\"}],\n        \"datePublished\": \"2018-11-16\",\n      }",{"title":6863,"description":6859,"authors":6864,"heroImage":4861,"date":6865,"body":6866,"category":14,"tags":6867},"GitLab 11.5 to support Elasticsearch 6, sunset support for Elasticsearch 5.5",[6454],"2018-11-16","\nIn Gitlab 11.5 (to be released on Nov. 22, 2018), GitLab's [Elasticsearch integration](https://docs.gitlab.com/ee/integration/advanced_search/elasticsearch.html)\nwill support Elasticsearch version 6, and will no longer support versions 5.5 or earlier.\nPlease make plans to upgrade Elasticsearch to version 5.6 or 6.x immediately before upgrading to GitLab 11.5. After you upgrade GitLab, you will also need\nto perform a [reindex](https://docs.gitlab.com/ee/integration/advanced_search/elasticsearch.html),\nas the changes required to support these Elasticsearch versions are incompatible with the indexes of previous versions.\n\nIn summary, starting with 11.5, GitLab will support:\n- Elasticsearch version 5.6\n- Elasticsearch version 6.x\n\nIf you are using GitLab.com, this does not impact you in any way. This is only relevant\nfor [self-managed GitLab](/pricing/#self-managed).\n{: .alert .alert-info}\n\nGitLab uses Elasticsearch for [Advanced Global Search](https://docs.gitlab.com/ee/user/search/advanced_search.html)\nand [Advanced Syntax Search](https://docs.gitlab.com/ee/user/search/advanced_search.html).\n\n## Why are we doing this?\n\nElasticsearch version 6 brings with it two large changes that were incompatible with the way we currently index:\n\n- The [removal of mapping types](https://www.elastic.co/guide/en/elasticsearch/reference/6.x/removal-of-types.html).\n- Parent-child relationships are now established via a [`join` datatype](https://www.elastic.co/guide/en/elasticsearch/reference/6.0/parent-join.html).\n\nWe'll go into some detail on how each of these changes affects GitLab.\n\n### Removal of mapping types\n\nIn Elasticsearch 6, all documents under the same index must be of the same 'type.' We need to keep all documents under the same index\nin order to be able to query based on project membership and permissions, so this change forced us to implement our own\n`type` field in order to still be able to query only a single type (for example, issues).\n\nThis removal of mapping types also affected [the way parent-child relationships work](https://www.elastic.co/guide/en/elasticsearch/reference/6.x/removal-of-types.html#_parent_child_without_mapping_types).\n\n### `join` datatype\n\nWith the mapping type change comes a change to the way parent-child relationships\nare expressed. Elasticsearch 5.6 and 6.x have introduced a [`join` datatype](https://www.elastic.co/guide/en/elasticsearch/reference/6.0/parent-join.html)\nthat GitLab 11.5 puts to use. (As of 6.0, it is the required method for defining these relationships.)\n\nWhen using `join`, all insertions and deletions must be routed relative to their\nparent – which means we must send the parent's ID in the `routing` field. In 5.6,\nthis means that the `_parent` field is ignored, and in 6.x it is removed.\n\n### Why Elasticsearch 5.6 remains compatible\n\nAs noted in the [schedule for removal of mapping types](https://www.elastic.co/guide/en/elasticsearch/reference/6.x/removal-of-types.html#_schedule_for_removal_of_mapping_types),\nversion 5.6 is the first Elasticsearch version where the `join` datatype is available, as well as the first version where `single_type`\nbehavior can be enabled.\n\nWe tested versions 5.5 and below, and unfortunately they have no support for `join` datatypes, so we need to end support for these versions as of GitLab 11.5.\n\nWe're especially looking forward to supporting Elasticsearch version 6 as it brings with it some great [improvements](https://www.elastic.co/blog/elasticsearch-6-0-0-released), including:\n\n- Major improvements for sparsely populated fields\n- Faster query times with sorted indices\n- Search scalability across shards\n",[749,2331],{"slug":6869,"featured":6,"template":678},"new-elasticsearch-version-requirements","content:en-us:blog:new-elasticsearch-version-requirements.yml","New Elasticsearch Version Requirements","en-us/blog/new-elasticsearch-version-requirements.yml","en-us/blog/new-elasticsearch-version-requirements",{"_path":6875,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6876,"content":6882,"config":6887,"_id":6889,"_type":16,"title":6890,"_source":17,"_file":6891,"_stem":6892,"_extension":20},"/en-us/blog/how-we-spent-two-weeks-hunting-an-nfs-bug",{"title":6877,"description":6878,"ogTitle":6877,"ogDescription":6878,"noIndex":6,"ogImage":6879,"ogUrl":6880,"ogSiteName":692,"ogType":693,"canonicalUrls":6880,"schema":6881},"How we spent two weeks hunting an NFS bug in the Linux kernel","Here's an in-depth recap of debugging a GitLab issue that culminated in a patch for the Linux kernel.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672173/Blog/Hero%20Images/nfs-bug-hunt-detective.jpg","https://about.gitlab.com/blog/how-we-spent-two-weeks-hunting-an-nfs-bug","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we spent two weeks hunting an NFS bug in the Linux kernel\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Stan Hu\"}],\n        \"datePublished\": \"2018-11-14\",\n      }",{"title":6877,"description":6878,"authors":6883,"heroImage":6879,"date":6884,"body":6885,"category":14,"tags":6886},[670],"2018-11-14","\n\nUPDATE 2019-08-06: This bug has now been resolved in the following\ndistributions:\n\n* [Red Hat Enterprise Linux 7](https://access.redhat.com/errata/RHSA-2019:2029)\n* [Ubuntu](https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1802585)\n* Linux mainline: Backported to [4.14-stable](https://lkml.org/lkml/2019/8/2/562) and [4.19-stable](https://lkml.org/lkml/2019/8/2/639)\n\nOn Sep. 14, the GitLab support team escalated a critical\nproblem encountered by one of our customers: GitLab would run fine for a\nwhile, but after some time users encountered errors. When attempting to\nclone certain repositories via Git, users would see an opaque `Stale\nfile error` message. The error message persisted for a long time,\nblocking employees from being able to work, unless a system\nadministrator intervened manually by running `ls` in the directory\nitself.\n\nThus launched an investigation into the inner workings of Git and the\nNetwork File System (NFS). The investigation uncovered a bug with the\nLinux v4.0 NFS client and culiminated with a [kernel patch that was written by\nTrond Myklebust](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?h=be189f7e7f03de35887e5a85ddcf39b91b5d7fc1)\nand [merged in the latest mainline Linux kernel](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?h=c7a2c49ea6c9eebbe44ff2c08b663b2905ee2c13)\non Oct. 26.\n\nThis post describes the journey of investigating the issue and\ndetails the thought process and tools by which we tracked down the\nbug. It was inspired by the fine detective work in [How I spent two\nweeks hunting a memory leak in Ruby](http://www.be9.io/2015/09/21/memory-leak/)\nby Oleg Dashevskii.\n\nMore importantly, this experience exemplifies how open source software\ndebugging has become a team sport that involves expertise across\nmultiple people, companies, and locations. The GitLab motto \"[everyone can\ncontribute](/company/mission/#mission)\" applies not only to GitLab itself, but also to other open\nsource projects, such as the Linux kernel.\n\n## Reproducing the bug\n\nWhile we have run NFS on GitLab.com for many years, we have stopped\nusing it to access repository data across our application\nmachines. Instead, we have [abstracted all Git calls to\nGitaly](/blog/the-road-to-gitaly-1-0/).\nStill, NFS remains a supported configuration for our customers who\nmanage their own installation of GitLab, but we had never seen the exact\nproblem described by the customer before.\n\n[Our customer gave us a few important clues](https://gitlab.com/gitlab-org/gitlab-ce/issues/51437):\n\n1. The full error message read, `fatal: Couldn't read ./packed-refs: Stale file handle`.\n2. The error seemed to start when they started a manual Git garbage\ncollection run via `git gc`.\n3. The error would go away if a system administrator ran `ls` in the\ndirectory.\n4. The error also would go away after `git gc` process ended.\n\nThe first two items seemed obviously related. When you push to a branch\nin Git, Git creates a loose reference, a fancy name for a file that\npoints your branch name to the commit. For example, a push to `master`\nwill create a file called `refs/heads/master` in the repository:\n\n```bash\n$ cat refs/heads/master\n2e33a554576d06d9e71bfd6814ee9ba3a7838963\n```\n\n`git gc` has several jobs, but one of them is to collect these loose\nreferences (refs) and bundle them up into a single file called\n`packed-refs`. This makes things a bit faster by eliminating the need to\nread lots of little files in favor of reading one large one. For\nexample, after running `git gc`, an example `packed-refs` might look\nlike:\n\n```\n# pack-refs with: peeled fully-peeled sorted\n564c3424d6f9175cf5f2d522e10d20d781511bf1 refs/heads/10-8-stable\nedb037cbc85225261e8ede5455be4aad771ba3bb refs/heads/11-0-stable\n94b9323033693af247128c8648023fe5b53e80f9 refs/heads/11-1-stable\n2e33a554576d06d9e71bfd6814ee9ba3a7838963 refs/heads/master\n```\n\nHow exactly is this `packed-refs` file created? To answer that, we ran\n`strace git gc` with a loose ref present. Here are the pertinent lines\nfrom that:\n\n```\n28705 open(\"/tmp/libgit2/.git/packed-refs.lock\", O_RDWR|O_CREAT|O_EXCL|O_CLOEXEC, 0666) = 3\n28705 open(\".git/packed-refs\", O_RDONLY) = 3\n28705 open(\"/tmp/libgit2/.git/packed-refs.new\", O_RDWR|O_CREAT|O_EXCL|O_CLOEXEC, 0666) = 4\n28705 rename(\"/tmp/libgit2/.git/packed-refs.new\", \"/tmp/libgit2/.git/packed-refs\") = 0\n28705 unlink(\"/tmp/libgit2/.git/packed-refs.lock\") = 0\n```\n\nThe system calls showed that `git gc` did the following:\n\n1. Open `packed-refs.lock`. This tells other processes that `packed-refs` is locked and cannot be changed.\n1. Open `packed-refs.new`.\n1. Write loose refs to `packed-refs.new`.\n1. Rename `packed-refs.new` to `packed-refs`.\n1. Remove `packed-refs.lock`.\n1. Remove loose refs.\n\nThe fourth step is the key here: the rename where Git puts `packed-refs`\ninto action. In addition to collecting loose refs, `git gc` also\nperforms a more expensive task of scanning for unused objects and\nremoving them. This task can take over an hour for large\nrepositories.\n\nThat made us wonder: for a large repository, does `git gc` keep the file\nopen while it's running this sweep? Looking at the `strace` logs and\nprobing the process with `lsof`, we found that it did the following:\n\n![Git Garbage Collection](https://about.gitlab.com/images/blogimages/nfs-debug/git-gc-diagram.svg)\n\nNotice that `packed-refs` is closed only at the end, after the potentially\nlong `Garbage collect objects` step takes place.\n\nThat made us wonder: how does NFS behave when one node has `packed-refs`\nopen while another renames over that file?\n\nTo experiment, we asked the customer to run the following experiment on\ntwo different machines (Alice and Bob):\n\n1. On the shared NFS volume, create two files: `test1.txt` and\n`test2.txt` with different contents to make it easy to distinguish them:\n\n    ```bash\n    alice $ echo \"1 - Old file\" > /path/to/nfs/test1.txt\n    alice $ echo \"2 - New file\" > /path/to/nfs/test2.txt\n    ```\n\n2. On machine Alice, keep a file open to `test1.txt`:\n\n    ```bash\n     alice $ irb\n     irb(main):001:0> File.open('/path/to/nfs/test1.txt')\n    ```\n\n3. On machine Alice, show the contents of `test1.txt` continuously:\n\n    ```bash\n    alice $ while true; do cat test1.txt; done\n    ```\n\n4. Then on machine Bob, run:\n\n    ```bash\n    bob $ mv -f test2.txt test1.txt\n    ```\n\nThis last step emulates what `git gc` does with `packed-refs` by\noverwriting the existing file.\n\nOn the customer's machine, the result looked something like:\n\n```\n1 - Old file\n1 - Old file\n1 - Old file\ncat: test1.txt: Stale file handle\n```\n\nBingo! We seemed to reproduce the problem in a controlled way. However,\nthe same experiment using a Linux NFS server did not have this\nproblem. The result was what you would expect: the new contents were\npicked up after the rename:\n\n```\n1 - Old file\n1 - Old file\n1 - Old file\n2 - New file  \u003C--- RENAME HAPPENED\n2 - New file\n2 - New file\n```\n\nWhy the difference in behavior? It turns out that the customer was using\nan [Isilon NFS\nappliance](https://www.dellemc.com/en-us/storage/isilon/index.htm) that\nonly supported NFS v4.0. By switching the mount parameters to v4.0 via\nthe `vers=4.0` parameter in `/etc/fstab`, the test revealed a different\nresult with the Linux NFS server:\n\n```\n1 - Old file\n1 - Old file\n1 - Old file\n1 - Old file \u003C--- RENAME HAPPENED\n1 - Old file\n1 - Old file\n```\n\nInstead of a `Stale file handle`, the Linux NFS v4.0 server showed stale\n*contents*. It turns out this difference in behavior can be explained by\nthe NFS spec. From [RFC\n3010](https://tools.ietf.org/html/rfc3010#page-153):\n\n> A filehandle may or may not become stale or expire on a rename.\n> However, server implementors are strongly encouraged to attempt to keep\n> file handles from becoming stale or expiring in this fashion.\n\nIn other words, NFS servers can choose how to behave if a file is\nrenamed; it's perfectly valid for any NFS server to return a `Stale file\nerror` when that happens. We surmised that even though the results were\ndifferent, the problem was likely related to the same issue. We\nsuspected some cache validation issue because running `ls` in the\ndirectory would \"clear\" the error. Now that we had a reproducible test\ncase, we asked the experts: the Linux NFS maintainers.\n\n## False path: NFS server delegations\n\nWith a clear set of reproduction steps, I [sent an email to the Linux\nNFS mailing list](https://marc.info/?l=linux-nfs&m=153721785231614&w=2)\ndescribing what we had found. Over the week, I went back and forth with\nBruce Fields, the Linux NFS server maintainer, who suggested this was a\nNFS bug and that it would be useful to look at the network traffic. He\nthought there might be an issue with NFS server delegations.\n\n### What is an NFS server delegation?\n\nIn a nutshell, NFS v4 introduced server delegations as a way to speed up file access. A server can\ndelegate read or write access to a client so that the client doesn't\nhave to keep asking the server whether that file has changed by another\nclient. In simpler terms, a write delegation is akin to someone lending\nyou a notebook and saying, \"Go ahead and write in here, and I'll take it\nback when I'm ready.\" Instead of having to ask to borrow the notebook\nevery time you want to write a new paragraph, you have free rein until\nthe owner reclaims the notebook. In NFS terms, this reclamation process\nis called a delegation recall.\n\nIndeed, a bug in the NFS delegation recall might explain the `Stale file\nhandle` problem. Remember that in the earlier experiment, Alice had\nan open file to `test1.txt` when it was replaced by `test2.txt` later.\nIt's possible that the server failed to recall the delegation on\n`test1.txt`, resulting in an incorrect state. To check whether this was\nan issue, we turned to `tcpdump` to capture NFS traffic and used\nWireshark to visualize it.\n\n[Wireshark](https://www.wireshark.org/) is a wonderful open source tool\nfor analyzing network traffic, and it's especially good for viewing NFS\nin action. We captured a trace using the following command on the NFS server:\n\n```\ntcpdump -s 0 -w /tmp/nfs.pcap port 2049\n```\n\nThis command captures all NFS traffic, which typically is on TCP port 2049.\nBecause our experiment worked properly with NFS v4.1 but did not\n with NFS v4.0, we could compare and contrast how NFS behaved\nin a non-working and a working case. With Wireshark, we saw the\nfollowing behavior:\n\n### NFS v4.0 (stale file case)\n\n![NFS v4.0 flow](https://about.gitlab.com/images/blogimages/nfs-debug/nfs-4.0-flow.svg)\n\nIn this diagram, we can see in step 1 Alice opens `test1.txt` and gets\nback an NFS file handle along with a `stateid` of 0x3000. When Bob\nattempts to rename the file, the NFS server tells to Bob to retry via\nthe `NFS4ERR_DELAY` message while it recalls the delegation from Alice\nvia the `CB_RECALL` message (step 3). Alice then returns her delegation\nvia `DELEGRETURN` (step 4), and then Bob attempts to send another\n`RENAME` message (step 5). The `RENAME` completes in both cases, but\nAlice continues to read using the same file handle.\n\n### NFS v4.1 (working case)\n\n![NFS v4.1 flow](https://about.gitlab.com/images/blogimages/nfs-debug/nfs-4.1-flow.svg)\n\nThe main difference happens at the bottom at step 6. Notice in NFS v4.0\n(the stale file case), Alice attempts to reuse the same `stateid`. In\nNFS v4.1 (working case), Alice performs an additional `LOOKUP` and\n`OPEN`, which causes the server to return a different `stateid`. In v4.0,\nthese extra messages are never sent. This explains why Alice continues\nto see stale content because she uses the old file handle.\n\nWhat makes Alice decide to do the extra `LOOKUP`? The delegation recall\nseemed to work fine, but perhaps there was still an issue, such as a\nmissing invalidation step. To rule that out, we disabled NFS delegations\nby issuing this command on the NFS server itself:\n\n```sh\necho 0 > /proc/sys/fs/leases-enable\n```\n\nWe repeated the experiment, but the problem persisted. All this\nconvinced us this wasn't a NFS server issue or a problem with NFS\ndelegations; it was a problem that led us to look into the NFS client\nwithin the kernel.\n\n## Digging deeper: the Linux NFS client\n\nThe first question we had to answer for the NFS maintainers:\n\n### Was this problem still in the latest upstream kernel?\n\nThe issue occurred with both CentOS 7.2 and Ubuntu 16.04 kernels, which\nused versions 3.10.0-862.11.6 and 4.4.0-130, respectively. However, both\nthose kernels lagged the most recent kernel, which was 4.19-rc2 at the\ntime.\n\nWe deployed a new Ubuntu 16.04 virtual machine on Google Cloud Platform\n(GCP), cloned the latest Linux kernel, and set up a kernel development\nenvironment. After generating a `.config` file via `make menuconfig`, we\nchecked two items:\n\n1. The NFS driver was compiled as a module (`CONFIG_NFSD=m`).\n2. The [required GCP kernel settings](https://cloud.google.com/compute/docs/images/building-custom-os)\nwere set properly.\n\nJust as a geneticist would use fruit flies to study evolution in\nreal time, the first item allowed us to make quick changes in the NFS\nclient without having to reboot the kernel. The second item was required\nto ensure that the kernel would actually boot after it was\ninstalled. Fortunately, the default kernel settings had all the settings\nright out of the box.\n\nWith our custom kernel, we verified that the stale file problem still\nexisted in the latest version. That begged a number of questions:\n\n1. Where exactly was this problem happening?\n2. Why was this problem happening with NFS v4.0 but not in v4.1?\n\nTo answer these questions, we began to investigate the NFS [source\ncode](/solutions/source-code-management/). Since we didn't have a kernel debugger available, we sprinkled the\nsource code with two main types of calls:\n\n1. `pr_info()` ([what used to be `printk`](https://lwn.net/Articles/487437/)).\n2. `dump_stack()`: This would show the stack trace of the current function call.\n\nFor example, one of the first things we did was hook into the\n`nfs4_file_open()` function in `fs/nfs/nfs4file.c`:\n\n```c\nstatic int\nnfs4_file_open(struct inode *inode, struct file *filp)\n{\n...\n        pr_info(\"nfs4_file_open start\\n\");\n        dump_stack();\n```\n\nAdmittedly, we could have [activated the `dprintk` messages with the\nLinux dynamic\ndebug](https://www.kernel.org/doc/html/v4.15/admin-guide/dynamic-debug-howto.html)\nor used\n[`rpcdebug`](https://www.thegeekdiary.com/how-to-enable-nfs-debug-logging-using-rpcdebug/),\nbut it was nice to be able to add our own messages to verify changes\nwere being made.\n\nEvery time we made changes, we recompiled the module and reinstalled it\ninto the kernel via the commands:\n\n```sh\nmake modules\nsudo umount /mnt/nfs-test\nsudo rmmod nfsv4\nsudo rmmod nfs\nsudo insmod fs/nfs/nfs.ko\nsudo mount -a\n```\n\nWith our NFS module installed, repeating the experiments would print\nmessages that would help us understand the NFS code a bit more. For\nexample, you can see exactly what happens when an application calls `open()`:\n\n```\nSep 24 20:20:38 test-kernel kernel: [ 1145.233460] Call Trace:\nSep 24 20:20:38 test-kernel kernel: [ 1145.233462]  dump_stack+0x8e/0xd5\nSep 24 20:20:38 test-kernel kernel: [ 1145.233480]  nfs4_file_open+0x56/0x2a0 [nfsv4]\nSep 24 20:20:38 test-kernel kernel: [ 1145.233488]  ? nfs42_clone_file_range+0x1c0/0x1c0 [nfsv4]\nSep 24 20:20:38 test-kernel kernel: [ 1145.233490]  do_dentry_open+0x1f6/0x360\nSep 24 20:20:38 test-kernel kernel: [ 1145.233492]  vfs_open+0x2f/0x40\nSep 24 20:20:38 test-kernel kernel: [ 1145.233493]  path_openat+0x2e8/0x1690\nSep 24 20:20:38 test-kernel kernel: [ 1145.233496]  ? mem_cgroup_try_charge+0x8b/0x190\nSep 24 20:20:38 test-kernel kernel: [ 1145.233497]  do_filp_open+0x9b/0x110\nSep 24 20:20:38 test-kernel kernel: [ 1145.233499]  ? __check_object_size+0xb8/0x1b0\nSep 24 20:20:38 test-kernel kernel: [ 1145.233501]  ? __alloc_fd+0x46/0x170\nSep 24 20:20:38 test-kernel kernel: [ 1145.233503]  do_sys_open+0x1ba/0x250\nSep 24 20:20:38 test-kernel kernel: [ 1145.233505]  ? do_sys_open+0x1ba/0x250\nSep 24 20:20:38 test-kernel kernel: [ 1145.233507]  __x64_sys_openat+0x20/0x30\nSep 24 20:20:38 test-kernel kernel: [ 1145.233508]  do_syscall_64+0x65/0x130\n```\n\nWhat are the `do_dentry_open` and `vfs_open` calls above? Linux has a\n[virtual filesystem\n(VFS)](https://www.kernel.org/doc/Documentation/filesystems/vfs.txt), an\nabstraction layer which provides a common interface for all\nfilesystems. The VFS documentation explains:\n\n> The VFS implements the open(2), stat(2), chmod(2), and similar system\n> calls. The pathname argument that is passed to them is used by the VFS\n> to search through the directory entry cache (also known as the dentry\n> cache or dcache). This provides a very fast look-up mechanism to\n> translate a pathname (filename) into a specific dentry. Dentries live\n> in RAM and are never saved to disc: they exist only for performance.\n\n### This gave us a clue: what if this was a problem with the dentry cache?\n\nWe noticed a lot of dentry cache validation was done in\n`fs/nfs/dir.c`. In particular, `nfs4_lookup_revalidate()` sounded\npromising. As an experiment, we hacked that function to bail\nout early:\n\n\n```diff\ndiff --git a/fs/nfs/dir.c b/fs/nfs/dir.c\nindex 8bfaa658b2c1..ad479bfeb669 100644\n--- a/fs/nfs/dir.c\n+++ b/fs/nfs/dir.c\n@@ -1159,6 +1159,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)\n        trace_nfs_lookup_revalidate_enter(dir, dentry, flags);\n        error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);\n        trace_nfs_lookup_revalidate_exit(dir, dentry, flags, error);\n+       goto out_bad;\n        if (error == -ESTALE || error == -ENOENT)\n                goto out_bad;\n        if (error)\n```\n\nThat made the stale file problem in our experiment go away! Now we were onto something.\n\nTo answer, \"Why does this problem not happen in NFS v4.1?\", we added\n`pr_info()` calls to every `if` block in that function. After running our\nexperiments with NFS v4.0 and v4.1, we found this special condition being run\nin the v4.1 case:\n\n```c\n        if (NFS_SB(dentry->d_sb)->caps & NFS_CAP_ATOMIC_OPEN_V1) {\n          goto no_open;\n        }\n```\n\nWhat is `NFS_CAP_ATOMIC_OPEN_V1`? We saw [this kernel\npatch](https://patchwork.kernel.org/patch/2300511/) mentioned this was\nan NFS v4.1-specific feature, and the code in `fs/nfs/nfs4proc.c`\nconfirmed that this flag was a capability present in v4.1 but not in v4.0:\n\n```c\nstatic const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {\n        .minor_version = 1,\n        .init_caps = NFS_CAP_READDIRPLUS\n                | NFS_CAP_ATOMIC_OPEN\n                | NFS_CAP_POSIX_LOCK\n                | NFS_CAP_STATEID_NFSV41\n                | NFS_CAP_ATOMIC_OPEN_V1\n```\n\nThat explained the difference in behavior: in the v4.1 case, the `goto\nno_open` would cause more validation to happen in\n`nfs_lookup_revalidate()`, but in v4.0, the `nfs4_lookup_revalidate()`\nwould return earlier. Now, how do we actually solve the problem?\n\n## The solution\n\nI reported the [findings to the NFS mailing\nlist](https://marc.info/?l=linux-nfs&m=153782129412452&w=2) and proposed\n[a naive patch](https://marc.info/?l=linux-nfs&m=153807208928650&w=2). A\nweek after the report, Trond Myklebust sent a [patch series to the list\nfixing this bug and found another related issue for NFS\nv4.1](https://marc.info/?l=linux-nfs&m=153816500525563&w=2).\n\nIt turns out the fix for the NFS v4.0 bug was deeper in the code base\nthan we had looked. Trond summarized it well in the\n[patch](https://marc.info/?l=linux-nfs&m=153816500525564&w=2):\n\n> We need to ensure that inode and dentry revalidation occurs correctly\n> on reopen of a file that is already open. Currently, we can end up not\n> revalidating either in the case of NFSv4.0, due to the 'cached open'\n> path.  Let's fix that by ensuring that we only do cached open for the\n> special cases of open recovery and delegation return.\n\nWe confirmed that this fix made the stale file problem go away and filed\nbug reports with\n[Ubuntu](https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1802585)\nand [RedHat](https://bugzilla.redhat.com/show_bug.cgi?id=1648482).\n\nKnowing full well that kernel changes may take a while to make it to\nstable releases, we also added a [workaround in\nGitaly](https://gitlab.com/gitlab-org/gitaly/merge_requests/924) to deal\nwith this issue. We did experiments to test that calling `stat()` on the\n`packed-refs` file appears to cause the kernel to revalidate the dentry\ncache for the renamed file. For simplicity, this is implemented in\nGitaly regardless of whether the filesystem is NFS; we only do this once\nbefore Gitaly \"opens\" a repository, and there are already other `stat()`\ncalls that check for other files.\n\n## What we learned\n\nA bug can be anywhere in your software stack, and sometimes you have to\nlook beyond your application to find it. Having helpful partners in the\nopen source world makes that job much easier.\n\nWe are extremely grateful to Trond Myklebust for fixing the problem, and\nBruce Fields for responding to questions and helping us understand\nNFS. Their responsiveness and professionalism truly reflects the best of\nthe open source community.\n\nPhoto by [dynamosquito](https://www.flickr.com/photos/dynamosquito) on [Flickr](https://www.flickr.com/photos/dynamosquito/4265771518)\n{: .note}\n",[268,702,915,703],{"slug":6888,"featured":6,"template":678},"how-we-spent-two-weeks-hunting-an-nfs-bug","content:en-us:blog:how-we-spent-two-weeks-hunting-an-nfs-bug.yml","How We Spent Two Weeks Hunting An Nfs Bug","en-us/blog/how-we-spent-two-weeks-hunting-an-nfs-bug.yml","en-us/blog/how-we-spent-two-weeks-hunting-an-nfs-bug",{"_path":6894,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6895,"content":6900,"config":6906,"_id":6908,"_type":16,"title":6909,"_source":17,"_file":6910,"_stem":6911,"_extension":20},"/en-us/blog/use-multiproject-pipelines-with-gitlab-cicd",{"title":6896,"description":6897,"ogTitle":6896,"ogDescription":6897,"noIndex":6,"ogImage":2478,"ogUrl":6898,"ogSiteName":692,"ogType":693,"canonicalUrls":6898,"schema":6899},"Multi-project pipelines for streamlined repository workflow","You can connect CI/CD pipelines and artifacts for multiple related projects to make managing interactions easy.","https://about.gitlab.com/blog/use-multiproject-pipelines-with-gitlab-cicd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to streamline interactions between multiple repositories with multi-project pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fabio Busatto\"}],\n        \"datePublished\": \"2018-10-31\",\n      }",{"title":6901,"description":6897,"authors":6902,"heroImage":2478,"date":6903,"body":6904,"category":14,"tags":6905},"How to streamline interactions between multiple repositories with multi-project pipelines",[6804],"2018-10-31","\nModern software products consist of different components and\n[microservices](/topics/microservices/) that work together, relying on many libraries and dependencies:\nbecause of this, many projects cannot be limited to one single repository.\n\nWith [GitLab 9.3](/releases/2017/06/22/gitlab-9-3-released/#multi-project-pipeline-graphs)\nwe released [multi-project pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html)\nto make interactions between different repositories easy to manage. Here's a look at how they work.\n\nNote: these features are available in GitLab [Premium](/pricing/#self-managed), [Gold subscriptions](/pricing/#gitlab-com),\nand public projects on GitLab.com only.\n{: .note}\n\n## What are multi-project pipelines, and how do they help?\n\nMulti-project pipelines span multiple repositories, creating a\nconnection between them. But what is technically possible to achieve, and how?\n\n### Start an external pipeline directly from your job\n\nThe most important feature is the ability to trigger an external pipeline\nfrom `gitlab-ci.yml`: using the special variable `$CI_JOB_TOKEN` and the\n[Pipeline Trigger API](https://docs.gitlab.com/ee/api/pipeline_triggers.html)\nyou can start another pipeline in a different project directly from your job,\nwithout setting any additional authentication token or configuration in the\ntarget project: GitLab automatically detects the user running the caller\npipeline, and run the target one with the same privileges.\n\nThe [`$CI_JOB_TOKEN` variable](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html)\nis automatically created when a job starts: it is associated with the user\nthat is running the job, so GitLab is able to enforce permissions when\ndealing with other related projects. It is also very limited in capabilities,\nand it is automatically destroyed as soon as the job ends, to prevent abuses.\n\n### Easily view related pipelines\n\nAnother very useful feature is the ability to see how projects are linked\ntogether directly in the [pipeline graph](https://docs.gitlab.com/ee/ci/pipelines/index.html#pipeline-graphs):\nupstream and downstream stages are rendered as squared boxes and connected\nto the main flow. They give you the status of the related pipelines and you\ncan easily jump to them by clicking the boxes. This feature is also available\nin the pipeline mini-graph that is shown in the Merge Request Widget (this\nfeature was released with [GitLab 9.4](/releases/2017/07/22/gitlab-9-4-released/#mini-graph-for-multi-project-pipelines)).\n\n![Multi-project pipeline graph](https://about.gitlab.com/images/blogimages/multi_project_pipeline_graph.png){: .shadow.center}\n *\u003Csmall>See how upstream and downstream pipelines are shown on both sides of the graph\u003C/small>*\n\n### Download artifacts from another project\n\nYou can also use the `$CI_JOB_TOKEN` variable with the Jobs API in order to\n[download artifacts](https://docs.gitlab.com/ee/api/jobs.html#get-job-artifacts)\nfrom another project. This is very helpful if one of the related pipelines\ncreates a dependency that you need (this has been possible since\n[GitLab 9.5](/releases/2017/08/22/gitlab-9-5-released/#cijobtoken-variable-for-artifacts-api)).\n\n## Why do we need multi-project pipelines?\n\nLet's see how multi-project pipelines could be very useful when dealing\nwith real-life projects.\n\n### Automatically test changes across all connected components\n\nA common development pattern is to have an API provider, a web\nfrontend, and some additional services (bulk data processing, email management,\netc). Each of these components has its own life in a different repository,\nbut they are strictly connected: a change in one of them should trigger\nbuilds and integration tests in all the related projects in order to check\nthat the changes are not introducing unintended behaviors. Linking those\nprojects with multi-project pipelines automates this task, and users\nwill receive notifications in case of failures.\n\n### Automatically trigger downstream pipelines for packaging\n\nAnother common scenario where multi-project pipelines can be used to simplify\nthe development workflow is packaging and releasing software: every time a\nchange is pushed to the stable branch, a downstream pipeline for the repository\nthat is responsible for packaging the application is triggered automatically.\nThis pipeline can easily fetch the latest artifacts from all the repositories\nthat contain the components of the application and create a Docker image or a\npackage that can be then published and distributed.\n\n## Example application\n\nYou can find [an example application here](https://gitlab.com/gitlab-examples/multi-project-pipelines/).\nIt consists of a Maven package and a command line app that uses it as a dependency.\n\nThe package is built and deployed to the [GitLab Maven Repository](https://docs.gitlab.com/ee/user/packages/maven_repository/index.html),\nthen it triggers a multi-project pipeline to update the entire application.\n\nYou can look at the [upstream](https://gitlab.com/gitlab-examples/multi-project-pipelines/simple-maven-dep/pipelines/33011429)\nand [downstream](https://gitlab.com/gitlab-examples/multi-project-pipelines/simple-maven-app/pipelines/33012000)\npipelines to see how the two projects interact to keep everything up to date.\n\n## Conclusion\n\nMulti-project pipelines are very helpful when dealing with big applications\nthat are not fully contained in a single repository. Existing features allow\nusers to connect them together and automate processes without complex setups.\n\nWe want to continue iterating on multi-project pipelines, and everyone is\ninvited to give feedback on this feature and suggest how we can make it even more\npowerful in the future.\n\n[Cover image](https://unsplash.com/photos/m3TYLFI_mDo?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) by Gerrie van der Walt on [Unsplash](https://unsplash.com/search/photos/pipes?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[110],{"slug":6907,"featured":6,"template":678},"use-multiproject-pipelines-with-gitlab-cicd","content:en-us:blog:use-multiproject-pipelines-with-gitlab-cicd.yml","Use Multiproject Pipelines With Gitlab Cicd","en-us/blog/use-multiproject-pipelines-with-gitlab-cicd.yml","en-us/blog/use-multiproject-pipelines-with-gitlab-cicd",{"_path":6913,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6914,"content":6919,"config":6924,"_id":6926,"_type":16,"title":6927,"_source":17,"_file":6928,"_stem":6929,"_extension":20},"/en-us/blog/setting-up-gitlab-ci-for-android-projects",{"title":6915,"description":6916,"ogTitle":6915,"ogDescription":6916,"noIndex":6,"ogImage":5904,"ogUrl":6917,"ogSiteName":692,"ogType":693,"canonicalUrls":6917,"schema":6918},"Setting up GitLab CI for Android projects","Learn how to set up GitLab CI to ensure your Android app compiles and passes tests.","https://about.gitlab.com/blog/setting-up-gitlab-ci-for-android-projects","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Setting up GitLab CI for Android projects\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jason Yavorska\"}],\n        \"datePublished\": \"2018-10-24\",\n      }",{"title":6915,"description":6916,"authors":6920,"heroImage":5904,"date":6921,"body":6922,"category":14,"tags":6923},[4945],"2018-10-24","\nNote: This is a new version of a previously published blog post, updated for the current Android API level (28). Thanks Grayson Parrelli for authoring [the original post](/blog/setting-up-gitlab-ci-for-android-projects/)!\n{: .alert .alert-info}\n\nHave you ever accidentally checked on a typo that broke your Android build or unknowingly broke an important use case with a new change? Continuous integration is a way for developers to avoid these headaches, allowing you to confirm that changes to your app compile, and your tests pass before they're merged in.\n\n[GitLab CI/CD](/solutions/continuous-integration/) is a wonderful [continuous integration](/blog/continuous-integration-delivery-and-deployment-with-gitlab/) built-in solution, and in this post we'll walk through how to set up a basic config file (`.gitlab-ci.yml`) to ensure your Android app compiles and passes unit and functional tests. We assume that you know the process of creating an Android app, can write and run tests locally, and are familiar with the basics of the GitLab UI.\n\n## Our sample project\n\nWe'll be working with a real-world open source Android project called [Materialistic](https://github.com/hidroh/materialistic) to demonstrate how easy it is to get up and running with GitLab CI for Android. Materialistic currently uses Travis CI with GitHub, but switching over is a breeze. If you haven't seen Materialistic before, it's a fantastic open source Android reader for [Hacker News](https://news.ycombinator.com).\n\n### Testing\n\n[Unit tests](https://developer.android.com/training/testing/unit-testing/index.html) are the fundamental tests in your app testing strategy, from which you can verify that the logic of individual units is correct. They are a fantastic way to catch regressions when making changes to your app. They run directly on the Java Virtual Machine (JVM), so you don't need an actual Android device to run them.\n\nIf you already have working unit tests, you shouldn't have to make any adjustments to have them work with GitLab CI. Materialistic uses [Robolectric](http://robolectric.org/) for tests, [Jacoco](https://www.eclemma.org/jacoco/) for coverage, and also has a linting pass. We'll get all of these easily running in our `.gitlab-ci.yml` example except for Jacoco, since that requires a secret token we do not have - though I will show you how to configure that in your own projects.\n\n## Setting up GitLab CI\n\nWe want to be able to configure our project so that our app is built, and it has the complete suite of tests run upon check-in. To do so, we have to create our GitLab CI configuration file, called `.gitlab-ci.yml`, and place it in the root of our project.\n\nSo, first things first: If you're just here for a snippet to copy-paste, here is a `.gitlab-ci.yml` that will build and test the Materialistic app:\n\n```yml\nimage: openjdk:8-jdk\n\nvariables:\n  ANDROID_COMPILE_SDK: \"28\"\n  ANDROID_BUILD_TOOLS: \"28.0.2\"\n  ANDROID_SDK_TOOLS:   \"4333796\"\n\nbefore_script:\n  - apt-get --quiet update --yes\n  - apt-get --quiet install --yes wget tar unzip lib32stdc++6 lib32z1\n  - wget --quiet --output-document=android-sdk.zip https://dl.google.com/android/repository/sdk-tools-linux-${ANDROID_SDK_TOOLS}.zip\n  - unzip -d android-sdk-linux android-sdk.zip\n  - echo y | android-sdk-linux/tools/bin/sdkmanager \"platforms;android-${ANDROID_COMPILE_SDK}\" >/dev/null\n  - echo y | android-sdk-linux/tools/bin/sdkmanager \"platform-tools\" >/dev/null\n  - echo y | android-sdk-linux/tools/bin/sdkmanager \"build-tools;${ANDROID_BUILD_TOOLS}\" >/dev/null\n  - export ANDROID_HOME=$PWD/android-sdk-linux\n  - export PATH=$PATH:$PWD/android-sdk-linux/platform-tools/\n  - chmod +x ./gradlew\n  # temporarily disable checking for EPIPE error and use yes to accept all licenses\n  - set +o pipefail\n  - yes | android-sdk-linux/tools/bin/sdkmanager --licenses\n  - set -o pipefail\n\nstages:\n  - build\n  - test\n\nlintDebug:\n  stage: build\n  script:\n    - ./gradlew -Pci --console=plain :app:lintDebug -PbuildDir=lint\n\nassembleDebug:\n  stage: build\n  script:\n    - ./gradlew assembleDebug\n  artifacts:\n    paths:\n    - app/build/outputs/\n\ndebugTests:\n  stage: test\n  script:\n    - ./gradlew -Pci --console=plain :app:testDebug\n```\n\nWell, that's a lot of code! Let's break it down.\n\n### Understanding `.gitlab-ci.yml`\n\n#### Defining the Docker Image\n{:.special-h4}\n\n```yml\nimage: openjdk:8-jdk\n```\n\nThis tells [GitLab Runners](https://docs.gitlab.com/ee/ci/runners/) (the things that are executing our build) what [Docker image](https://hub.docker.com/explore/) to use. If you're not familiar with [Docker](https://hub.docker.com/), the TL;DR is that Docker provides a way to create a completely isolated version of a virtual operating system running in its own [container](https://www.sdxcentral.com/cloud/containers/definitions/what-is-docker-container-open-source-project/). Anything running inside the container thinks it has the whole machine to itself, but in reality there can be many containers running on a single machine. Unlike full virtual machines, Docker containers are super fast to create and destroy, making them great choices for setting up temporary environments for building and testing.\n\nThis [Docker image (`openjdk:8-jdk`)](https://hub.docker.com/_/openjdk/) works perfectly for our use case, as it is just a barebones installation of Debian with Java pre-installed. We then run additional commands further down in our config to make our image capable of building Android apps.\n\n#### Defining variables\n\n```yml\nvariables:\n  ANDROID_COMPILE_SDK: \"28\"\n  ANDROID_BUILD_TOOLS: \"28.0.2\"\n  ANDROID_SDK_TOOLS:   \"4333796\"\n```\n\nThese are variables we'll use throughout our script. They're named to match the properties you would typically specify in your app's `build.gradle`.\n\n- `ANDROID_COMPILE_SDK` is the version of Android you're compiling with. It should match `compileSdkVersion`.\n- `ANDROID_BUILD_TOOLS` is the version of the Android build tools you are using. It should match `buildToolsVersion`.\n- `ANDROID_SDK_TOOLS` is a little funny. It's what version of the command line tools we're going to download from the [official site](https://developer.android.com/studio/index.html). So, that number really just comes from the latest version available there.\n\n#### Installing packages\n{:.special-h4}\n\n```yml\nbefore_script:\n  - apt-get --quiet update --yes\n  - apt-get --quiet install --yes wget tar unzip lib32stdc++6 lib32z1\n```\n\nThis starts the block of the commands that will be run before each job in our config.\n\nThese commands ensure that our package repository listings are up to date, and it installs packages we'll be using later on, namely: `wget`, `tar`, `unzip`, and some packages that are necessary to allow 64-bit machines to run Android's 32-bit tools.\n\n#### Installing the Android SDK\n\n```yml\n  - wget --quiet --output-document=android-sdk.zip https://dl.google.com/android/repository/sdk-tools-linux-${ANDROID_SDK_TOOLS}.zip\n  - unzip -d android-sdk-linux android-sdk.zip\n  - echo y | android-sdk-linux/tools/bin/sdkmanager \"platforms;android-${ANDROID_COMPILE_SDK}\" >/dev/null\n  - echo y | android-sdk-linux/tools/bin/sdkmanager \"platform-tools\" >/dev/null\n  - echo y | android-sdk-linux/tools/bin/sdkmanager \"build-tools;${ANDROID_BUILD_TOOLS}\" >/dev/null\n```\n\nHere we're downloading the Android SDK tools from their official location, using our `ANDROID_SDK_TOOLS` variable to specify the version. Afterwards, we're unzipping the tools and running a series of `sdkmanager` commands to install the necessary Android SDK packages that will allow our app to build.\n\n#### Setting up the environment\n\n```yml\n  - export ANDROID_HOME=$PWD/android-sdk-linux\n  - export PATH=$PATH:$PWD/android-sdk-linux/platform-tools/\n  - chmod +x ./gradlew\n  # temporarily disable checking for EPIPE error and use yes to accept all licenses\n  - set +o pipefail\n  - yes | android-sdk-linux/tools/bin/sdkmanager --licenses\n  - set -o pipefail\n```\n\nFinally, we wrap up the `before_script` section of our config with a few remaining tasks. First, we set the `ANDROID_HOME` environment variable to the SDK location, which is necessary for our app to build. Next, we add the platform tools to our `PATH`, allowing us to use the `adb` command without specifying its full path, which is important when we run a downloaded script later. Next, we ensure that `gradlew` is executable, as sometimes Git will mess up permissions.\n\nThe next command `yes | android-sdk-linux/tools/bin/sdkmanager --licenses` is responsible for accepting the SDK licenses. Because the unix `yes` command results in an EPIPE error once the pipe is broken (when the sdkmanager quits normally), we temporarily wrap the command in `+o pipefile` so that it does not terminate script execution when it fails.\n\n#### Defining the stages\n\n```yml\nstages:\n  - build\n  - test\n```\n\nHere we're defining the different [stages](https://docs.gitlab.com/ee/ci/yaml/#stages) of our build. We can call these anything we want. A stage can be thought of as a group of [jobs](https://docs.gitlab.com/ee/ci/jobs/). All of the jobs in the same stage happen in parallel, and all jobs in one stage must be completed before the jobs in the subsequent stage begin. We've defined two stages: `build` and `test`. They do exactly what you think: the `build` stage ensures the app compiles, and the `test` stage runs our unit and functional tests.\n\n#### Building the app\n\n```yml\nlintDebug:\n  stage: build\n  script:\n    - ./gradlew -Pci --console=plain :app:lintDebug -PbuildDir=lint\n\nassembleDebug:\n  stage: build\n  script:\n    - ./gradlew assembleDebug\n  artifacts:\n    paths:\n    - app/build/outputs/\n```\n\nThis defines our first job, called `build`. It has two parts - a linter to ensure that the submitted code is up to snuff, and the actual compilation of the code (and configuration of the `artifacts` that GitLab should expect to find). These are run in parallel for maximum efficiency.\n\n#### Running tests\n\n```yml\ndebugTests:\n  stage: test\n  script:\n    - ./gradlew -Pci --console=plain :app:testDebug\n```\n\nThis defines a job called `debugTests` that runs during the `test` stage. Nothing too crazy here about setting something simple like this up!\n\nIf we had wanted to get Jacoco also working, that would be very straightforward. Simply adding a section as follows would work - the only additional thing you'd need to do is add a secret variable containing your personal `COVERALLS_REPO_TOKEN`:\n\n```yml\ncoverageTests:\n  stage: test\n  script:\n    - ./gradlew -Pci --console=plain jacocoTestReport coveralls\n```\n\n## Run your new CI setup\n\nAfter you've added your new `.gitlab-ci.yml` file to the root of your directory, just push your changes to the appropriate branch and off you go! You can see your running builds in the **Pipelines** tab of your project. You can even watch your build execute live and see the runner's output, allowing you to debug problems easily.\n\n![Pipelines tab screenshot](https://about.gitlab.com/images/blogimages/gitlab-ci-for-android-2018/tutorial-01.png){:.shadow}\n\nAfter your build is done, you can retrieve your build artifacts:\n\n- First, click on your completed build, then navigate to the Jobs tab:\n\n![Build details button screenshot](https://about.gitlab.com/images/blogimages/gitlab-ci-for-android-2018/tutorial-02.png){:.shadow}\n\nFrom here, simply click on the download button to download your build artifacts.\n\n## Conclusion\n\nSo, there you have it! You now know how to create a GitLab CI config that will ensure your app:\n\n- Compiles\n- Passes tests\n- Allows you to access your build artifacts (like your [APK](https://en.wikipedia.org/wiki/Android_application_package)) afterwards.\n\nYou can take a look at my local copy of the Materialistic repository, with everything up and running, at [this link](https://gitlab.com/jyavorska/androidblog-2018)\n\nEnjoy your newfound app stability :)\n\n\u003C!-- closes https://gitlab.com/gitlab-com/www-gitlab-com/issues/3167 -->\n\u003C!-- cover image: https://unsplash.com/photos/aso6SYJZGps -->\n\n\u003Cstyle>\n  img {\n    display: block;\n    margin: 0 auto 20px auto;\n  }\n  .special-h4 {\n    margin-top: 20px !important;\n  }\n\u003C/style>\n",[110,4440],{"slug":6925,"featured":6,"template":678},"setting-up-gitlab-ci-for-android-projects","content:en-us:blog:setting-up-gitlab-ci-for-android-projects.yml","Setting Up Gitlab Ci For Android Projects","en-us/blog/setting-up-gitlab-ci-for-android-projects.yml","en-us/blog/setting-up-gitlab-ci-for-android-projects",{"_path":6931,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6932,"content":6938,"config":6943,"_id":6945,"_type":16,"title":6946,"_source":17,"_file":6947,"_stem":6948,"_extension":20},"/en-us/blog/personas-and-empathy-building",{"title":6933,"description":6934,"ogTitle":6933,"ogDescription":6934,"noIndex":6,"ogImage":6935,"ogUrl":6936,"ogSiteName":692,"ogType":693,"canonicalUrls":6936,"schema":6937},"How we use personas to build empathy for different types of users","Welcome to our series on the new GitLab personas!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678789/Blog/Hero%20Images/how-we-use-personas-to-gain-empathy.jpg","https://about.gitlab.com/blog/personas-and-empathy-building","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we use personas to build empathy for different types of users\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Katherine Okpara\"}],\n        \"datePublished\": \"2018-10-12\",\n      }",{"title":6933,"description":6934,"authors":6939,"heroImage":6935,"date":6940,"body":6941,"category":14,"tags":6942},[6687],"2018-10-12","\nLast year we discussed our motivations for using personas at GitLab, including [why they're important](/blog/the-importance-of-ux-personas/) and how to [create them through UX research](/blog/discovering-gitlabs-personas/). Since then, our teams have had many conversations about improving the design of our product and continuing to empathize with our users. As a result, we created an initiative to fully incorporate personas into our design process. This will help everyone learn more about the different people who use GitLab!\n\n#### What’s New\nWe’ve made several changes in format since the first iteration of personas:\n\n- Gender-neutral name: humanizing the persona while still ensuring that it is inclusive\n- Job description: helping your audience learn about what the user does and who they work with\n- “Jobs-to-be-done” (JTBD) framework: making the information more concise and easier to digest\n- Alternative job titles: understanding how the research findings apply to other user groups with similar needs and challenges\n\nTypically, the most insightful personas are a realistic representation of user needs. They help you understand who you’re designing for and allow other people in all departments of your company to hear directly from users. In this series, we’ll share findings from our recent round of research and highlight what we’ve learned about each role.\n\n#### Want to learn more?\nYou can now view the personas [in our handbook](/handbook/product/personas/). Here's a quick summary of what's inside:\n* [Parker, Product Manager](/handbook/product/personas/#parker-product-manager)\n* [Delaney, Development Team Lead](/handbook/product/personas/#delaney-development-team-lead)\n* [Devon, DevOps Engineer](/handbook/product/personas/#devon-devops-engineer)\n* [Sasha, Software Developer](/handbook/product/personas/#sasha-software-developer)\n* [Sydney, Systems Administrator](/handbook/product/personas/#sidney-systems-administrator)\n* [Sam, Security Analyst](/handbook/product/personas/#sam-security-analyst)\n\nHow does your team use personas in the design process? Connect with us [@gitlab](https://twitter.com/gitlab), and stay tuned for the next posts, where we’ll dive deep into the findings, limitations, and opportunities of each.\n\n[Photo](https://unsplash.com/photos/fgiFAtH0QBU) by [gabrielle cole](https://unsplash.com/@gabriellefaithhenderson) on Unsplash.\n{: .note}\n",[1328,727],{"slug":6944,"featured":6,"template":678},"personas-and-empathy-building","content:en-us:blog:personas-and-empathy-building.yml","Personas And Empathy Building","en-us/blog/personas-and-empathy-building.yml","en-us/blog/personas-and-empathy-building",{"_path":6950,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6951,"content":6957,"config":6961,"_id":6963,"_type":16,"title":6964,"_source":17,"_file":6965,"_stem":6966,"_extension":20},"/en-us/blog/strategies-to-reduce-cycle-times",{"title":6952,"description":6953,"ogTitle":6952,"ogDescription":6953,"noIndex":6,"ogImage":6954,"ogUrl":6955,"ogSiteName":692,"ogType":693,"canonicalUrls":6955,"schema":6956},"10 strategies for cycle time reduction","Engineering leads share strategies on how to speed up cycle times.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668437/Blog/Hero%20Images/faster-cycle-times.jpg","https://about.gitlab.com/blog/strategies-to-reduce-cycle-times","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"10 strategies for cycle time reduction\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2018-10-12\",\n      }",{"title":6952,"description":6953,"authors":6958,"heroImage":6954,"date":6940,"body":6959,"category":14,"tags":6960},[6768],"\n\nEvery product manager appreciates shorter cycle times. One way to reduce cycle\ntimes is to learn from others, so five of our engineering leads share the greatest\nchallenges their teams have experienced and offer the strategies they developed\nto speed up iteration.\n\n>“The impact of shorter cycle times is that users can see the result of their\ninput quickly. Instead of contributing to the planning process and then waiting\nfor weeks to see the feature start to take shape, they can regularly see changes,\nmaking them happy and keeping them engaged with a team. This also helps reduce\nthe scope creep that happens when a project has been in progress for a while.” – Rachel Nienaber\n\n## What's the average cycle time for development teams?\n\nAccording to the [Accelerate State of DevOps Report](https://www.hatica.io/blog/cycle-time/#:~:text=The%20Accelerate%20State%20of%20DevOps,cycle%20time%20of%206.2%20days), the average cycle time for top-performing teams is about 2 days, with the median for most teams being about 3.5 days. However, some development teams [report their average cycle times](https://linearb.io/blog/how-to-reduce-cycle-time-in-software-delivery/) as being as much as 7 days. Teams can calculate this by evaluating how long several types of fixes take from start to finish.\n\n## What are some cycle time challenges?\n\nEvery team has processes and steps that increase cycle delivery time. A shorter and faster time to market empowers teams to fulfill customer demands and exceed their expectations. Here are a few of\nthe ones we’ve dealt with in recent past.\n\n### Getting it right the first time\n\nWhen developing new features, we want to ensure that things don’t break when it\ngets to a user. Because of our monthly release cycle, users could be stuck with\na broken feature until the following month, causing frustration and decreasing\nthe value that GitLab brings to its users. So, it’s important that we test and\nship with certainty. [Marin Jankovski](/company/team/#maxlazio), Engineering Manager of\nthe Distribution & Release Management teams, and [Sean McGivern](/company/team/#mcgivernsa),\nEngineering Manager of the Plan team, note the importance of testing and shipping features.\n\n\n>“Finding a way to test changes faster can be challenging. With the Distribution\nteam, we have the responsibility of ensuring that the release we ship still\nfunctions after we make our changes and that users can still install and use GitLab.”\n– Marin Jankovski\n\n>“Our release process is a big challenge, if you consider that the cycle ends\nonce customers have the feature available to use. We don’t have CD for\nGitLab.com, but even if we did, for self-managed customers, we only have one\nfeature release a month. So, that’s a hard limit.” – Sean McGivern\n\n### Differentiating the helpful from the unhelpful\n\nEvery workflow has components that can decrease release cycles, including code\nreviews, manual configuration and testing, and hand-offs. Some of these elements\nare necessary, like product manager meetings, but other aspects can unintentionally\ncause problems. [Tommy Morgan](/company/team/#itstommymorgan), Director of Engineering of\nDev Backend, highlights the essential measures that teams need to take to promote\ncollaboration and alignment but may increase cycle times.\n\n\n>“Teams have all these things that are slowing down cycle times, and there could\nbe extra steps or extra involvement that aren’t necessary or beneficial and that\ncould unintentionally add pressure to the team to slow down. One of the biggest\nchallenges is identifying which ones are legitimate and helpful and which ones\nare us giving into the natural urge to add process. Identifying across that fine\nline is where the real challenge comes into play for most teams.”\n\n### Working across teams\n\nCross-collaboration fosters innovative thinking and allows each team to specialize\nin a specific area to maximize contributions. While the benefits of working with\nmultiple teams are abundant, depending on another team’s feedback or assistance\nslows down development, especially when there’s a blocker that can only be resolved\nwith the help of one team. [Rachel Nienaber](/company/team/#rachel-nienaber), Engineering\nManager of the Geo team, and Marin agree that working across teams can have\nsignificant impact on cycle times.\n\n\n>“When other teams implement a new feature that needs some additional work from\nthe Distribution side, getting informed in time is extremely important. We need\nto affect the decision as early as possible, because we have certain limitations\nwhen it comes to distributing GitLab.” – Marin Jankovski\n\n>“One challenge that I see is that there are a lot of dependencies on people\nexternal to the team to ship features. Ordinarily, a quick way to shorten cycle\ntime is to reduce those dependencies, but here at GitLab, that may reduce the\namount of collaboration that happens with each feature. Collaboration is such an\nimportant [value](https://handbook.gitlab.com/handbook/values/#collaboration) that this may have to take\nprecedence in some cases and be more important than the gain in speed.” – Rachel Nienaber\n\n### Asynchronous communication\n\nAt GitLab, we practice [asynchronous communication](/handbook/communication/),\nso we “don’t expect an instantaneous response,” allowing us to focus on our\nindividual workflows. The problem with working asynchronously is that projects\ncan become delayed when working with team members in different time zones and\nresponses don’t trickle in until the following day. Rapid movement might not be\nmade on projects because of time zone differences. [Mek Stittri](/company/team/#mekdev),\nEngineering Manager of the Quality team, and Rachel acknowledge the difficulties\nthat can come with asynchronous communication.\n\n>“My team is spread across so many projects and has someone in almost every time\nzone, meaning communication can be challenging.” – Mek Stittri\n\n>“This is my first role with an asynchronous method of working. I am finding that\nmany practices that work in a synchronous team need some adjustment to be useful here.” – Rachel Nienaber\n\n## What are some solutionsb to reducing cycle times?\n\nAt GitLab, we’re fortunate to have the freedom to experiment and\n[iterate](https://handbook.gitlab.com/handbook/values/#iteration), so we’ve been able to develop a few\nstrategies to help us alleviate the challenges we face when meeting customer demands by reducing cycle times.\n\n### How to get it right the first time\n\n\u003Col start=\"1\">\n    \u003Cli>\n    \u003Cp>\n        \u003Cb>Automate work as much as possible.\u003C/b> Using CI to automatically do releases and investing time in automating\n        other manual tasks is crucial for delivery. Manual tasks are both a huge\n        drain on morale and prone to errors. It’s much easier to give engineers\n        a bug to fix in an automated tool than to ask them to do the same thing\n        multiple times.\n    \u003C/p>\n    \u003C/li>\n    \u003Cli>\n    \u003Cp>\n        \u003Cb>Work with smaller, iterative pieces.\u003C/b> Breaking work into smaller chunks,\n        \u003Ca href=\"/handbook/values/#iteration\">iterating\u003C/a> frequently, and\n        \u003Ca href=\"https://gitlab.com/gl-retrospectives/plan/issues/10\">indicating priority more clearly\u003C/a>\n        within a milestone enables better predictability for what’s going to ship.\n        Planning becomes easier, because individual issues are smaller, so it’s\n        easy to shuffle issues around if something unexpected interrupts other\n        work.\n    \u003C/p>\n    \u003C/li>\n    \u003Cli>\n    \u003Cp>\n        \u003Cb>Use feature flags.\u003C/b> Rather than using a giant merge request to make\n        every change for a feature at once, which is harder to review, update,\n        and keep up-to-date with the master branch, consider developing\n        more features behind short-lived \u003Ca href=\"https://docs.gitlab.com/ee/development/feature_flags/index.html\">feature flags\u003C/a>.\n    \u003C/p>\n    \u003C/li>\n\u003C/ol>\n\n### How to differentiate the helpful from the unhelpful\n\n\u003Col start=\"4\">\n    \u003Cli>\n    \u003Cp>\n        \u003Cb>Measure the impact of components.\u003C/b> Measuring impact can help determine\n        whether a process either doesn’t help out that much in the end or helps\n        out infrequently. In either case, the net benefit can be small, but the\n        pain it adds (in terms of how much extra time you spend trying to ship)\n        makes the overall impact negative. If you can’t measure impact directly,\n        you have to be willing to experiment. Try things, see how they work, and\n        decide if you should keep them or not. It’s important to remember that\n        experimentation doesn’t mean process creep - the default end state for\n        an experiment should be “let’s never do that again,” unless there’s a\n        strong sense of value in it.\n    \u003C/p>\n    \u003C/li>\n\u003C/ol>\n\n\n### How to successfully work across teams\n\n\u003Col start=\"5\">\n    \u003Cli>\n    \u003Cp>\n        \u003Cb>Communicate and automate where possible.\u003C/b> Automating how others get a\n        finished product before releasing it (e.g. create a package on click)\n        and \u003Ca href=\"/handbook/engineering/development/enablement/systems/distribution/#how-to-work-with-distribution\">broadly communicating\u003C/a>\n        how to work with a team can result in better decisions and faster discussions.\n    \u003C/p>\n    \u003C/li>\n\n    \u003Cli>\n    \u003Cp>\n        \u003Cb>Develop a training program.\u003C/b> Creating a training program to help engineers\n        from other teams perform reviews can reduce cycle time for those teams\n        that regularly depend on the Database team. This strategy has the added\n        benefit of giving the Database team more time to focus on their own work.\n    \u003C/p>\n    \u003C/li>\n\n    \u003Cli>\n    \u003Cp>\n        \u003Cb>Use project management tooling.\u003C/b> Consistent \u003Ca href=\"/handbook/engineering/quality/project-management/\">project management tooling\u003C/a>\n        ensures consistent board configuration that behaves the same at every level,\n        meaning that data rolls up to one top level board which contains a\n        snapshot of an entire team, ensuring that prioritization is clear and\n        workload is transparent.\n\n    \u003C/p>\n    \u003C/li>\n\n    \u003Cli>\n    \u003Cp>\n        \u003Cb>Spread triaging.\u003C/b> To spread the load of triaging across teams, use \u003Ca href=\"https://gitlab.com/gitlab-org/gitlab-ce/issues?scope=all&utf8=%E2%9C%93&state=closed&label_name[]=triage-package\">triage-package\u003C/a>.\n        Here is a \u003Ca href=\"https://gitlab.com/gitlab-org/gitlab-ce/issues/52024\">recent example\u003C/a>\n        of how we used triage-package to lessen the burden on one team.\n\n    \u003C/p>\n    \u003C/li>\n\n    \u003Cli>\n    \u003Cp>\n        \u003Cb>Have more focused milestones.\u003C/b> Creating focused milestones can reduce\n        context switching, since team members can concentrate on specific aspects\n        of a feature.\n    \u003C/p>\n    \u003C/li>\n\u003C/ol>\n\n### How to make asynchronous communication work\n\n\u003Col start=\"10\">\n    \u003Cli>\n    \u003Cp>\n        \u003Cb>Work on multiple items.\u003C/b> Having a list of multiple items to work on\n        during each release cycle helps team members easily transition to another\n        task rather than remaining blocked when waiting for feedback.\n    \u003C/p>\n    \u003C/li>\n\u003C/ol>\n\n## Advice\n\nReducing cycle times to meet internal cycle time goals can be a difficult undertaking, requiring the input from\nproduct managers, engineering leads, and developers. It’s a hard task to\nchallenge long-practiced behaviors, especially when the worst case scenario could\nmean features don’t make a release. Here is some advice to help your team's cycle time reduction effort.\n\n### Be thoughtful and considerate\n\n“At GitLab, we want to iterate quickly, but we also want to keep GitLab.com fast\nand stable. That means that we can’t just decide to ship things faster, we need\nto come up with strategies to mitigate any risks to performance and availability,\nbuild tooling and processes around those strategies. This is often work that can\ngo underappreciated, and it can be hard at times, but it’s vital to ensuring\nthat you can safely shorten cycle times.” – Sean McGivern\n\n### Retrospectives for learning\n\n“A successful team is a happy team. Bringing down production cycle time can help a team be\nmore successful because they are shipping value more often, but your team might\nhave more important things that must be addressed first. Using retrospectives\nwill help you to figure out what success means to your team, and what needs to\nbe done to achieve that success.” – Rachel Nienaber\n\n### Experiment\n\n“Make yourself uncomfortable. It’s unnatural to push for shorter cycle time.\nIt’s natural to add steps - it’s not natural to remove them. Try drastic cuts\nand be willing to learn from an experiment.” – Tommy Morgan\n\n### Spotlight your team\n\n“You can’t make product managers happy, so try to make your team happy instead\nby giving them a chance to shine. :P” – Marin Jankovski\n\n",[727,1347],{"slug":6962,"featured":6,"template":678},"strategies-to-reduce-cycle-times","content:en-us:blog:strategies-to-reduce-cycle-times.yml","Strategies To Reduce Cycle Times","en-us/blog/strategies-to-reduce-cycle-times.yml","en-us/blog/strategies-to-reduce-cycle-times",{"_path":6968,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6969,"content":6974,"config":6979,"_id":6981,"_type":16,"title":6982,"_source":17,"_file":6983,"_stem":6984,"_extension":20},"/en-us/blog/gitlab-com-stability-post-gcp-migration",{"title":6970,"description":6971,"ogTitle":6970,"ogDescription":6971,"noIndex":6,"ogImage":6819,"ogUrl":6972,"ogSiteName":692,"ogType":693,"canonicalUrls":6972,"schema":6973},"What's up with GitLab.com? Check out the latest data on its stability","Let's take a look at the data on the stability of GitLab.com from before and after our recent migration from Azure to GCP, and dive into why things are looking up.","https://about.gitlab.com/blog/gitlab-com-stability-post-gcp-migration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What's up with GitLab.com? Check out the latest data on its stability\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andrew Newdigate\"}],\n        \"datePublished\": \"2018-10-11\",\n      }",{"title":6970,"description":6971,"authors":6975,"heroImage":6819,"date":6976,"body":6977,"category":14,"tags":6978},[1462],"2018-10-11","\nThis post is inspired by [this comment on Reddit](https://www.reddit.com/r/gitlab/comments/9f71nq/thanks_gitlab_team_for_improving_the_stability_of/),\nthanking us for improving the stability of GitLab.com. Thanks, hardwaresofton! Making GitLab.com\nready for your mission-critical workloads has been top of mind for us for some time, and it's\ngreat to hear that users are noticing a difference.\n\n_Please note that the numbers in this post differ slightly from the Reddit post as the data has changed since that post._\n\nWe will continue to work hard on improving the availability and stability of the platform. Our\ncurrent goal is to achieve 99.95 percent availability on GitLab.com – look out for an upcoming\npost about how we're planning to get there.\n\n## GitLab.com stability before and after the migration\n\nAccording to [Pingdom](http://stats.pingdom.com/81vpf8jyr1h9), GitLab.com's availability for the year to date, up until the migration was **[99.68 percent](https://docs.google.com/spreadsheets/d/1uJ_zacNvJTsvJUfNpi1D_aPBg-vNJC1xJzsSwGKKt8g/edit#gid=527563485&range=F2)**, which equates to about 32 minutes of downtime per week on average.\n\nSince the migration, our availability has improved greatly, although we have much less data to compare with than in Azure.\n\n![Availability Chart](https://docs.google.com/spreadsheets/d/e/2PACX-1vQg_tdtdZYoC870W3u2R2icSK0Rd9qoOtDJqYHALaQlzhxXOmfY63X1NMMyFVEypQs7NngR4UUIZx5R/pubchart?oid=458170195&format=image)\n\nUsing data publicly available from Pingdom, here are some stats about our availability for the year to date:\n\n| Period                                 | Mean-time between outage events |\n| -------------------------------------- | ------------------------------- |\n| Pre-migration (Azure)                  | **1.3 days**                    |\n| Post-migration (GCP)                   | **7.3 days**                    |\n| Post-migration (GCP) excluding 1st day | **12 days**                     |\n\nThis is great news: we're experiencing outages less frequently. What does this mean for our availability, and are we on track to achieve our goal of 99.95 percent?\n\n| Period                    | Availability                                                                                                                   | Downtime per week |\n| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | ----------------- |\n| Pre-migration (Azure)     | **[99.68%](https://docs.google.com/spreadsheets/d/1uJ_zacNvJTsvJUfNpi1D_aPBg-vNJC1xJzsSwGKKt8g/edit#gid=527563485&range=F2)**  | **32 minutes**    |\n| Post-migration (GCP)      | **[99.88 %](https://docs.google.com/spreadsheets/d/1uJ_zacNvJTsvJUfNpi1D_aPBg-vNJC1xJzsSwGKKt8g/edit#gid=527563485&range=B3)** | **13 minutes**    |\n| Target – not yet achieved | **99.95%**                                                                                                                     | **5 minutes**     |\n\nDropping from 32 minutes per week average downtime to 13 minutes per week means we've experienced a **61 percent improvement** in our availability following our migration to Google Cloud Platform.\n\n## Performance\n\nWhat about the performance of GitLab.com since the migration?\n\nPerformance can be tricky to measure. In particular, averages are a terrible way of measuring performance, since they neglect outlying values. One of the better ways to measure performance is with a latency histogram chart. To do this, we imported the GitLab.com access logs for July (for Azure) and September (for Google Cloud Platform) into [Google BigQuery](https://cloud.google.com/bigquery/), then selected the 100 most popular endpoints for each month and categorised these as either API, web, git, long-polling, or static endpoints. Comparing these histograms side-by-side allows us to study how the performance of GitLab.com has changed since the migration.\n\n![GitLab.com Latency Histogram](https://about.gitlab.com/images/blogimages/whats-up-with-gitlab-com/azure_v_gcp_latencies.gif)\n\nIn this histogram, higher values on the left indicate better performance. The right of the graph is the \"_tail_\", and the \"_fatter the tail_\", the worse the user experience.\n\nThis graph shows us that with the move to GCP, more requests are completing within a satisfactory amount of time.\n\nHere's two more graphs showing the difference for API and Git requests respectively.\n\n![API Latency Histogram](https://about.gitlab.com/images/blogimages/whats-up-with-gitlab-com/api-performance-histogram.png)\n\n![Git Latency Histogram](https://about.gitlab.com/images/blogimages/whats-up-with-gitlab-com/git-performance-histogram.png)\n\n## Why these improvements?\n\nWe chose Google Cloud Platform because we believe that Google offer the most reliable cloud platform for our workload, particularly as we move towards running GitLab.com in [Kubernetes](/solutions/kubernetes/).\n\nHowever, there are many other reasons unrelated to our change in cloud provider for these improvements to stability and performance.\n\n> #### _“We chose Google Cloud Platform because we believe that Google offer the most reliable cloud platform for our workload”_\n\nLike any large SaaS site, GitLab.com is a large, complicated system, and attributing availability changes to individual changes is extremely difficult, but here are a few factors which may be effecting our availability and performance:\n\n### Reason #1: Our Gitaly Fleet on GCP is much more powerful than before\n\nGitaly is responsible for all Git access in the GitLab application. Before Gitaly, Git access occurred directly from within Rails workers. Because of the scale we run at, we require many servers serving the web application, and therefore, in order to share git data between all workers, we relied on NFS volumes. Unfortunately this approach doesn't scale well, which led to us building Gitaly, a dedicated Git service.\n\n> #### _“We've opted to give our fleet of 24 Gitaly servers a serious upgrade”_\n\n#### Our upgraded Gitaly fleet\n\nAs part of the migration, we've opted to give our fleet of 24 [Gitaly](/blog/the-road-to-gitaly-1-0/) servers a serious upgrade. If the old fleet was the equivalent of a nice family sedan, the new fleet are like a pack of snarling musclecars, ready to serve your Git objects.\n\n| Environment | Processor                       | Number of cores per instance | RAM per instance |\n| ----------- | ------------------------------- | ---------------------------- | ---------------- |\n| Azure       | Intel Xeon Ivy Bridge @ 2.40GHz | 8                            | 55GB             |\n| GCP         | Intel Xeon Haswell @ 2.30GHz    | **32**                       | **118GB**        |\n\nOur new Gitaly fleet is much more powerful. This means that Gitaly can respond to requests more quickly, and deal better with unexpected traffic surges.\n\n#### IO performance\n\nAs you can probably imagine, serving [225TB of Git data](https://dashboards.gitlab.com/d/ZwfWfY2iz/vanity-metrics-dashboard?orgId=1) to roughly half-a-million active users a week is a fairly IO-heavy operation. Any performance improvements we can make to this will have a big impact on the overall performance of GitLab.com.\n\nFor this reason, we've focused on improving performance here too.\n\n| Environment | RAID         | Volumes | Media    | filesystem | Performance                                                            |\n| ----------- | ------------ | ------- | -------- | ---------- | ---------------------------------------------------------------------- |\n| Azure       | RAID 5 (lvm) | 16      | magnetic | xfs        | 5k IOPS, 200MB/s (_per disk_) / 32k IOPS **1280MB/s** (_volume group_) |\n| GCP         | No raid      | 1       | **SSD**  | ext4       | **60k read IOPs**, 30k write IOPs, 800MB/s read 200MB/s write          |\n\nHow does this translate into real-world performance? Here are average read and write times across our Gitaly fleet:\n\n##### IO performance is much higher\n\nHere are some comparative figures for our Gitaly fleet from Azure and GCP. In each case, the performance in GCP is much better than in Azure, although this is what we would expect given the more powerful fleet.\n\n[![Disk read time graph](https://docs.google.com/spreadsheets/d/e/2PACX-1vQg_tdtdZYoC870W3u2R2icSK0Rd9qoOtDJqYHALaQlzhxXOmfY63X1NMMyFVEypQs7NngR4UUIZx5R/pubchart?oid=458168633&format=image)](https://docs.google.com/spreadsheets/d/1uJ_zacNvJTsvJUfNpi1D_aPBg-vNJC1xJzsSwGKKt8g/edit#gid=1002437172) [![Disk write time graph](https://docs.google.com/spreadsheets/d/e/2PACX-1vQg_tdtdZYoC870W3u2R2icSK0Rd9qoOtDJqYHALaQlzhxXOmfY63X1NMMyFVEypQs7NngR4UUIZx5R/pubchart?oid=884528549&format=image)](https://docs.google.com/spreadsheets/d/1uJ_zacNvJTsvJUfNpi1D_aPBg-vNJC1xJzsSwGKKt8g/edit#gid=1002437172) [![Disk Queue length graph](https://docs.google.com/spreadsheets/d/e/2PACX-1vQg_tdtdZYoC870W3u2R2icSK0Rd9qoOtDJqYHALaQlzhxXOmfY63X1NMMyFVEypQs7NngR4UUIZx5R/pubchart?oid=2135164979&format=image)](https://docs.google.com/spreadsheets/d/1uJ_zacNvJTsvJUfNpi1D_aPBg-vNJC1xJzsSwGKKt8g/edit#gid=1002437172)\n\nNote: For reference: for Azure, this uses the average times for the week leading up to the failover. For GCP, it's an average for the week up to October 2, 2018.\n\nThese stats clearly illustrate that our new fleet has far better IO performance than our old cluster. Gitaly performance is highly dependent on IO performance, so this is great news and goes a long way to explaining the performance improvements we're seeing.\n\n### Reason #2: Fewer \"unicorn worker saturation\" errors\n\n![HTTP 503 Status GitLab](https://about.gitlab.com/images/blogimages/whats-up-with-gitlab-com/facepalm-503.png)\n\nUnicorn worker saturation sounds like it'd be a good thing, but it's really not!\n\nWe ([currently](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/1899)) rely on [unicorn](https://bogomips.org/unicorn/), a Ruby/Rack http server, for serving much of the application. Unicorn uses a single-threaded model, which uses a fixed pool of workers processes. Each worker can handle only one request at a time. If the worker gives no response within 60 seconds, it is terminated and another process is spawned to replace it.\n\n> #### _“Unicorn worker saturation sounds like it'd be a good thing, but it's really not!”_\n\nAdd to this the lack of autoscaling technologies to ramp the fleet up when we experience high load volumes, and this means that GitLab.com has a relatively static-sized pool of workers to handle incoming requests.\n\nIf a Gitaly server experiences load problems, even fast [RPCs](https://en.wikipedia.org/wiki/Remote_procedure_call) that would normally only take milliseconds, could take up to several seconds to respond – thousands of times slower than usual. Requests to the unicorn fleet that communicate with the slow server will take hundreds of times longer than expected. Eventually, most of the fleet is handling requests to that affected backend server. This leads to a queue which affects all incoming traffic, a bit like a tailback on a busy highway caused by a traffic jam on a single offramp.\n\nIf the request gets queued for too long – after about 60 seconds – the request will be cancelled, leading to a 503 error. This is indiscriminate – all requests, whether they interact with the affected server or not, will get cancelled. This is what I call unicorn worker saturation, and it's a very bad thing.\n\nBetween February and August this year we frequently experienced this phenomenon.\n\nThere are several approaches we've taken to dealing with this:\n\n- **Fail fast with aggressive timeouts and circuitbreakers**: Timeouts mean that when a Gitaly request is expected to take a few milliseconds, they time out after a second, rather than waiting for the request to time out after 60 seconds. While some requests will still be affected, the cluster will remain generally healthy. Gitaly currently doesn't use circuitbreakers, but we plan to add this, possibly using [Istio](https://istio.io/docs/tasks/traffic-management/circuit-breaking/) once we've moved to Kubernetes.\n\n- **Better abuse detection and limits**: More often than not, server load spikes are driven by users going against our fair usage policies. We built tools to better detect this and over the past few months, an abuse team has been established to deal with this. Sometimes, load is driven through huge repositories, and we're working on reinstating fair-usage limits which prevent 100GB Git repositories from affecting our entire fleet.\n\n- **Concurrency controls and rate limits**: For limiting the blast radius, rate limiters (mostly in HAProxy) and concurrency limiters (in Gitaly) slow overzealous users down to protect the fleet as a whole.\n\n### Reason #3: GitLab.com no longer uses NFS for any Git access\n\nIn early September we disabled Git NFS mounts across our worker fleet. This was possible because Gitaly had reached v1.0: the point at which it's sufficiently complete. You can read more about how we got to this stage in our [Road to Gitaly blog post](/blog/the-road-to-gitaly-1-0/).\n\n### Reason #4: Migration as a chance to reduce debt\n\nThe migration was a fantastic opportunity for us to improve our infrastructure, simplify some components, and otherwise make GitLab.com more stable and more observable, for example, we've rolled out new **structured logging infrastructure**.\n\nAs part of the migration, we took the opportunity to move much of our logging across to structured logs. We use [fluentd](https://www.fluentd.org/), [Google Pub/Sub](https://cloud.google.com/pubsub/docs/overview), [Pubsubbeat](https://github.com/GoogleCloudPlatform/pubsubbeat), storing our logs in [Elastic Cloud](https://www.elastic.co/cloud) and [Google Stackdriver Logging](https://cloud.google.com/logging/). Having reliable, indexed logs has allowed us to reduce our mean-time to detection of incidents, and in particular detect abuse. This new logging infrastructure has also been invaluable in detecting and resolving several security incidents.\n\n> #### _“This new logging infrastructure has also been invaluable in detecting and resolving several security incidents”_\n\nWe've also focused on making our staging environment much more similar to our production environment. This allows us to test more changes, more accurately, in staging before rolling them out to production. Previously the team was maintaining\na limited scaled-down staging environment and many changes were not adequately tested before being rolled out. Our environments now share a common configuration and we're working to automate all [terraform](https://gitlab.com/gitlab-com/gl-infra/infrastructure/issues/5079) and [chef](https://gitlab.com/gitlab-com/gl-infra/infrastructure/issues/5078) rollouts.\n\n### Reason #5: Process changes\n\nUnfortunately many of the worst outages we've experienced over the past few years have been self-inflicted. We've always been transparent about these — and will continue to be so — but as we rapidly grow, it's important that our processes scale alongside our systems and team.\n\n> #### _“It's important that our processes scale alongside our systems and team”_\n\nIn order to address this, over the past few months, we've formalized our change and incident management processes. These processes respectively help us to avoid outages and resolve them quicker when they do occur.\n\nIf you're interested in finding out more about the approach we've taken to these two vital disciplines, they're published in our handbook:\n\n- [GitLab.com's Change Management Process](/handbook/engineering/infrastructure/change-management/)\n- [GitLab.com's Incident Management Process](/handbook/engineering/infrastructure/incident-management/)\n\n### Reason #6: Application improvement\n\nEvery GitLab release includes [performance and stability improvements](https://gitlab.com/gitlab-org/gitlab-ce/issues?scope=all&state=opened&label_name%5B%5D=performance); some of these have had a big impact on GitLab's stability and performance, particularly n+1 issues.\n\nTake Gitaly for example: like other distributed systems, Gitaly can suffer from a class of performance degradations known as \"n+1\" problems. This happens when an endpoint needs to make many queries (_\"n\"_) to fulfill a single request.\n\n> Consider an imaginary endpoint which queried Gitaly for all tags on a repository, and then issued an additional query for each tag to obtain more information. This would result in n + 1 Gitaly queries: one for the initial tag, and then n for the tags. This approach would work fine for a project with 10 tags – issuing 11 requests, but a project with 1000 tags, this would result in 1001 Gitaly calls, each with a round-trip time, and issued in sequence.\n\n![Latency drop in Gitaly endpoints](https://about.gitlab.com../../images/blogimages/whats-up-with-gitlab-com/drop-off.png)\n\nUsing data from Pingdom, this chart shows long-term performance trends since the start of the year. It's clear that latency improved a great deal on May 7, 2018. This date happens to coincide with the RC1 release of GitLab 10.8, and its deployment on GitLab.com.\n\nIt turns out that this was due to a [single fix on n+1 on the merge request page being resolved](https://gitlab.com/gitlab-org/gitlab-ce/issues/44052).\n\nWhen running in development or test mode, GitLab now detects n+1 situations and we have compiled [a list of known n+1s](https://gitlab.com/gitlab-org/gitlab-ce/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=performance&label_name[]=Gitaly&label_name[]=technical%20debt). As these are resolved we expect even more performance improvements.\n\n![GitLab Summit - South Africa - 2018](https://about.gitlab.com/images/summits/2018_south-africa_team.jpg)\n\n### Reason #7: Infrastructure team growth and reorganization\n\nAt the start of May 2018, the Infrastructure team responsible for GitLab.com consisted of five engineers.\n\nSince then, we've had a new director join the Infrastructure team, two new managers, a specialist [Postgres DBRE](https://gitlab.com/gitlab-com/www-gitlab-com/merge_requests/13778), and four new [SREs](https://handbook.gitlab.com/job-families/engineering/infrastructure/site-reliability-engineer/). The database team has been reorganized to be an embedded part of infrastructure group. We've also brought in [Ongres](https://www.ongres.com/), a specialist Postgres consultancy, to work alongside the team.\n\nHaving enough people in the team has allowed us to be able to split time between on-call, tactical improvements, and longer-term strategic work.\n\nOh, and we're still hiring! If you're interested, check out [our open positions](/jobs/) and choose the Infrastructure Team 😀\n\n## TL;DR: Conclusion\n\n1. GitLab.com is more stable: availability has improved 61 percent since we migrated to GCP\n1. GitLab.com is faster: latency has improved since the migration\n1. We are totally focused on continuing these improvements, and we're building a great team to do it\n\nOne last thing: our Grafana dashboards are open, so if you're interested in digging into our metrics in more detail, visit [dashboards.gitlab.com](https://dashboards.gitlab.com) and explore!\n",[1204,728,915,1002,676,704],{"slug":6980,"featured":6,"template":678},"gitlab-com-stability-post-gcp-migration","content:en-us:blog:gitlab-com-stability-post-gcp-migration.yml","Gitlab Com Stability Post Gcp Migration","en-us/blog/gitlab-com-stability-post-gcp-migration.yml","en-us/blog/gitlab-com-stability-post-gcp-migration",{"_path":6986,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":6987,"content":6993,"config":6999,"_id":7001,"_type":16,"title":7002,"_source":17,"_file":7003,"_stem":7004,"_extension":20},"/en-us/blog/meltano-functional-group-update-post",{"title":6988,"description":6989,"ogTitle":6988,"ogDescription":6989,"noIndex":6,"ogImage":6990,"ogUrl":6991,"ogSiteName":692,"ogType":693,"canonicalUrls":6991,"schema":6992},"New Meltano personas, priorities, and updates from the team","There's a lot going on — here are some of the highlights on user research, dogfooding Meltano, embedding engineers, and hiring!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678847/Blog/Hero%20Images/meltano-fgu.jpg","https://about.gitlab.com/blog/meltano-functional-group-update-post","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"New Meltano personas, priorities, and updates from the team\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jacob Schatz\"}],\n        \"datePublished\": \"2018-10-08\",\n      }",{"title":6988,"description":6989,"authors":6994,"heroImage":6990,"date":6996,"body":6997,"category":14,"tags":6998},[6995],"Jacob Schatz","2018-10-08","\nJacob Schatz here, Staff Engineer for [Meltano](https://gitlab.com/meltano)! We've been heads down working on improving Meltano, and figured it was time for an update. We've had some great conversations that have helped us identify two general personas. Our team is also growing, and we're ready for frontend contributions, but more on that later.\n\nWe've been conducting interviews to zero in on what our users will want, what they're currently doing, and what tools they're using. Over the course of those conversations, we saw two main scenarios emerge. People either wanted a command line interface (CLI) or a graphical user interface (GUI). The GUIs that exist are painful to use, and not very intuitive. In both scenarios, people we spoke with are frustrated. This goes back to the original reason [we decided to create Meltano](/blog/hey-data-teams-we-are-working-on-a-tool-just-for-you/) — our data team members were relying on frustrating and expensive toolsets with poor UIs.\n\n### What are the Meltano personas?\n\nOur conversations revealed two general types of users:\n* Users who have engineers on staff\n* Users who do not have engineers on staff, or their engineers do not have bandwidth to help them\n\nThe Data team at GitLab, for example, has data engineers on staff who are willing, able, and happy to write Python. We won't be able to write every extractor and loader, so our users can follow our [specifications](https://gitlab.com/meltano/specifications), which are based off of the [Singer specifications](https://github.com/singer-io/getting-started). We want to make that as easy as possible, so Meltano can be the glue between all these different pieces.\n\nFor the other teams who don’t have the technical resources, we want to make it as if they had engineers on staff. Ideally, they'll just need to click a couple of buttons, run extract, load and transform with the extractors and loaders that we already have. Hopefully in the future the community can contribute more to these types of different extractors and loaders.\n\nYou can check out our updated [readme](https://gitlab.com/meltano/meltano/blob/master/README.md) with more info about Meltano and our personas. We're working iteratively, so if you have a different setup or scenario to share, we want to hear from you about your experience! Get in touch with us and tell us about your struggles or successes with your data team.\n\n### What’s next?\n\nWe're focused on our own CLI and GUI, and continuing to build more extractors and loaders (or [\"taps and targets\"](https://www.singer.io/)). We will be the glue that ties everything together. While current Singer taps and targets support extracting and loading, we'll be supporting much more, like removal of PII. Our CLI will support all of this from one configuration. We also want the CLI to have a really nice user experience, so I'm working with GitLab UX to help make it happen.\n\nAs always, we’re looking for contributors! In the [Dashboard project](https://gitlab.com/meltano/dashboard) you’ll see the Chart.js library that I’m building to make really nice dashboards for Meltano. Although we've had a ton of great Python contributions, we haven’t had as many contributors to the frontend, so we’d love your help there.\n\n### In other news\nThere's a lot going on, here are some of the highlights!\n\n#### Dogfooding\nIn my experience, unless one experiences the direct results of the code they write, and feel the pain their users feel when they hit a bug, one might not correctly solve the problem. Currently, we fulfill the data team's requests, but if something doesn't work they merely report back to us, without us experiencing the pain ourselves. We're changing how we work in order to imprint the idea that if something is broken, it's the Meltano team's responsibility. We’re all investigating every single pipeline failure, regardless of whose “fault” it is, because these suggest that it may be a poor user experience.\n\n#### Embedded engineers\nIn order to dogfood better, we've taken a data engineer from the data team, and an engineer from the Meltano team. They split their work 50/50 so each does half of their usual work and half of each other's work. It's already made a huge difference by giving us more eyes and ears on lots of issues, and allowing the engineers to approach problems from a different angle. Another added benefit is that every Meltano engineer gets direct exposure and experience from the data team, to make them better data scientists as well product engineers.\n\nThat's all for now, get in touch with us in our [issue tracker](https://gitlab.com/groups/meltano/-/boards), and tweet us [@meltanodata](https://twitter.com/meltanodata)!\n\nCover [image](https://unsplash.com/photos/2FPjlAyMQTA) by [John Schnobrich](https://unsplash.com/@johnschno) on Unsplash\n{: .note}\n\n[Emily von Hoffmann](https://about.gitlab.com/company/team/#emvonhoffmann) contributed to this post.\n{: .note}\n",[2932,749,702,915,727],{"slug":7000,"featured":6,"template":678},"meltano-functional-group-update-post","content:en-us:blog:meltano-functional-group-update-post.yml","Meltano Functional Group Update Post","en-us/blog/meltano-functional-group-update-post.yml","en-us/blog/meltano-functional-group-update-post",{"_path":7006,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7007,"content":7013,"config":7019,"_id":7021,"_type":16,"title":7022,"_source":17,"_file":7023,"_stem":7024,"_extension":20},"/en-us/blog/working-on-two-git-branches-at-the-same-time",{"title":7008,"description":7009,"ogTitle":7008,"ogDescription":7009,"noIndex":6,"ogImage":7010,"ogUrl":7011,"ogSiteName":692,"ogType":693,"canonicalUrls":7011,"schema":7012},"How to work on two Git branches at the same time","Watch the demo on how using the GitLab Web IDE and your local dev environment to work on two branches at once can help save time.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678782/Blog/Hero%20Images/working-on-two-git-branches-at-the-same-time.jpg","https://about.gitlab.com/blog/working-on-two-git-branches-at-the-same-time","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to work on two Git branches at the same time\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Chia\"}],\n        \"datePublished\": \"2018-10-03\",\n      }",{"title":7008,"description":7009,"authors":7014,"heroImage":7010,"date":7016,"body":7017,"category":14,"tags":7018},[7015],"William Chia","2018-10-03","\nI was recently using both my local development environment and the GitLab [Web IDE](/blog/introducing-gitlab-s-integrated-development-environment/), and found a really nice workflow for working with two Git branches simultaneously.\n\n### The problem\n\nIn this scenario, you’re doing development work on one branch, in one part of your codebase, and then likely documenting your process in another place. I really don’t want all of this in one merge request, because I don’t want to delay shipping the development work if [the docs](https://docs.gitlab.com) aren’t done. I want to be able to get it live so that others can see it, give feedback on each individual component, and iterate on it. At the same time, I don’t want to delay too long on documenting the process, because I want the docs to be as accurate and reproducible as possible.\n\n### The fix\n\nWhile doing my development work in my local development environment, I created another merge request for the documentation using the [Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/), essentially working on two different Git branches at the same time, using two different editors.\n\nIn my quick example below, you can see a merge request to add Jenkins content to our [DevOps tools](/competition/) page. I’ve checked out this branch locally, and I have it open in my Atom editor. I’ve been doing some work by updating `features.yml`, as well as a Markdown file and a Haml file. All of these changes are related to one merge request. While I’m committing changes locally to the comparison page, I’m documenting each step in my Web IDE in a separate tab, to make sure my instructions are precise, helpful, and completed in real time.\n\n### Watch the demo\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/uV3ycYnwhBc\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nYou can see what we've got planned for the Web IDE in 2019 in our post about [our product vision for DevOps Create](/blog/create-vision/).\n\nWhat are other ways the Web IDE has come in handy for you? Let us know by tweeting us [@gitlab](https://twitter.com/gitlab)!\n\nCover [photo](https://unsplash.com/photos/3y1zF4hIPCg) by [Hans-Peter Gauster](https://unsplash.com/photos/3y1zF4hIPCg) on Unsplash\n{: .note}\n",[2932,749,702,915,727],{"slug":7020,"featured":6,"template":678},"working-on-two-git-branches-at-the-same-time","content:en-us:blog:working-on-two-git-branches-at-the-same-time.yml","Working On Two Git Branches At The Same Time","en-us/blog/working-on-two-git-branches-at-the-same-time.yml","en-us/blog/working-on-two-git-branches-at-the-same-time",{"_path":7026,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7027,"content":7033,"config":7039,"_id":7041,"_type":16,"title":7042,"_source":17,"_file":7043,"_stem":7044,"_extension":20},"/en-us/blog/why-all-organizations-need-prometheus",{"title":7028,"description":7029,"ogTitle":7028,"ogDescription":7029,"noIndex":6,"ogImage":7030,"ogUrl":7031,"ogSiteName":692,"ogType":693,"canonicalUrls":7031,"schema":7032},"Why Prometheus is for everyone","You think you don't need Prometheus – I'm here to tell you why you're wrong. Learn why GitLab uses Prometheus, and why your organization should be using it too!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678778/Blog/Hero%20Images/monitoring-cover.png","https://about.gitlab.com/blog/why-all-organizations-need-prometheus","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why Prometheus is for everyone\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lee Matos\"}],\n        \"datePublished\": \"2018-09-27\",\n      }",{"title":7028,"description":7029,"authors":7034,"heroImage":7030,"date":7036,"body":7037,"category":14,"tags":7038},[7035],"Lee Matos","2018-09-27","\nIt's no secret that here at GitLab, we hitched our wagon to [Prometheus](https://docs.gitlab.com/ee/administration/monitoring/prometheus/index.html#doc-nav) long ago. We've been\n[shipping it with GitLab since 8.16](/releases/2017/01/22/gitlab-8-16-released/). Having said that,\neven within GitLab we weren't all using Prometheus. The Support Engineering team was\nvery much in the camp of \"We don't need this to troubleshoot customer problems.\" We were wrong;\nwe needed Prometheus all along, and here's why your organization should be using it too.\n\n## What is Prometheus?\n\nFor a short answer, Prometheus is software that stores event data in real-time. But more specifically…\n\nPrometheus is a powerful and free open-source software monitoring service that records real-time metrics and provides real-time alerts. It’s built with an HTTP pull model. Prometheus collects data performance metrics which you can view through an external dashboard tool (such as Grafana) or by directly connecting to Prometheus. \n\nSoundcloud was the original developer of Prometheus but nowadays is continuously maintained by the Cloud Native Computing Foundation (CNCF). The cloud-native architecture of Prometheus has made it extremely popular as part of a modern technology stack. \n\n## Prometheus is great, so why isn't everyone using it already?\n\nI think GitLab customers fall into a few categories: You have the customer who wants to use GitLab\nbut can't or doesn't want to manage servers. They'll use [GitLab.com](/pricing/)! By making that choice they can\nleverage the hard work of our Production team and reap the benefits of what Prometheus has to offer.\n\nThen you have the customer who is [running their own simple GitLab deployment](/pricing/#self-managed), but they may\nnot know or appreciate the value of Prometheus metrics. The Support Engineering team was\nlike this too! We thought, \"We can use traditional tools. Just knowing about where logging is,\nknowing about the system, is enough to actually solve the problems that we see. Just having\nexperience is enough.\" Not so.\n\nThen you have large, enterprise customers who are deploying GitLab clusters with multiple dozens of\nservers and a lot of moving parts. For them, Prometheus really shines because the complexity\nballoons, and once you move GitLab from a single server to three, or four, or 20, being able\nto see all of the metrics in one view makes a huge difference in time to resolving critical infrastructure issues.\n\n## How we saw the light about Prometheus\n\nA large GitLab customer was experiencing a really strange, catastrophic failure scenario, and\nthe problem was proving evasive to the support team. Even after days of troubleshooting we couldn't\nfind what we were looking for, so we called in [Jacob](/company/team/#jacobvosmaer) from our\n[Gitaly](/blog/the-road-to-gitaly-1-0/) team because it looked like Gitaly was at the\ncore of the problem. We had been using Gitaly on GitLab.com for about six months at that point\nand he had never seen it behave this way before. It looked like Gitaly was accessing Git data,\nbut just _extremely slow_, and it would spread across the cluster one server at a time. Jacob\nand I speculated and made some Gitaly dashboards, and while that was a good moment of cross-team\ncollaboration, he was stumped.\n\nMost of the time when we're debugging GitLab, it's clear to pinpoint the root of the problem.\nBut in this case, it was a catastrophic failure across the entire cluster that was a ticking timebomb.\nWhen we'd see the indicators we'd effectively have 15-35 minutes before the entire fleet was down.\nThis customer actually had Prometheus on their roadmap but hadn't deployed it yet, so when\nthe failure happened it was top of our list of things to deploy:\n\n**Support**: We should focus on trying to understand why this host is affected.\n\n**Production**: If we get better observability with Prometheus we'll move faster.\n\n**Support**: I'm worried this is a distraction! We don't have much time.\n\n**Production**: Watch and learn. Watch and learn.\n\n_(Cue dramatic montage of hackers with GitLab stickers on their laptops feverishly typing under duress)_\n\nOnce Prometheus was in place, we called in the Production team. They run one of the largest\nGitLab instances: GitLab.com. We exported their dashboard and gave it to the customer, so\nwithin minutes they had a GitLab production-scale dashboard that was all of the things that\nour production engineers use. Now, we could leverage the wealth of knowledge of our Prometheus\nexperts, as it's a familiar interface and they know exactly what they're looking at.\n\nWith that as a starting point we started querying and slicing data, and dashboards, which let\nus build a couple of different facets that let us view the data and come to some conclusions.\n\"Okay, it looks like once a host becomes 'tainted,' all Git-level operations spike and _HALT_.\nNow we can finally ask the question, why?\" And then, when we asked that, we saw that it was\na problem with Amazon's EFS file system. We had hit some upper boundary of EFS access and,\nhaving identified it, we were able to fix it by moving those specific files out of EFS. After we\nmade that change it was easy to use Prometheus and Grafana to verify that the state was sound\nand everything was working as expected afterwards, without even lifting a finger. We just looked\nat the dashboard in place. So while the customer had intended to deploy Prometheus later this\nyear, now, in this emergency situation, Prometheus definitely saved the day and is a huge part\nof keeping their GitLab infrastructure healthy. Without it we wouldn't be nearly as confident\nor comfortable in our solution.\n\n## Prometheues has opened up a whole world of possibilities.\n\nWe have another large client that's on an older version of GitLab without Prometheus. We're\nworking to debug things there and while we're able to do it, it's slower going. It requires a lot\nmore manual effort to coalesce the data and get it in a form we can use. It often takes about\n35-40 minutes to get the data, slicing with grep, AWK, and friends and at least one man page\nto look up syntax. Whereas, with Prometheus and Grafana, we'd be able to just access and view\nthe data, query it, and affect it within minutes. We already have a lot of [built-in monitoring capabilities](https://docs.gitlab.com/ee/administration/monitoring/). GitLab is a complex\nsystem built of various open source sub-systems, and we're monitoring all of them with Prometheus.\nYou can too.\n\n### Everyone should be using our GitLab.com dashboard\n\nAs I said earlier, in our intense, catastrophic scenario we gave the customer our GitLab.com\ndashboard. Any customer can use this dashboard as a template! You literally can go to [dashboards.gitlab.com](https://dashboards.gitlab.com), click \"export,\" get the dashboard, run your instance, then click \"import.\" It will show up, and\nyou just need to tweak the name so that it's not defaulting to our GitLab Production cluster.\nThen Prometheus just fills in the data.\n\n\u003Ciframe src=\"https://giphy.com/embed/12NUbkX6p4xOO4\" width=\"480\" height=\"440\" frameBorder=\"0\" class=\"giphy-embed\" allowFullScreen>\u003C/iframe>\n\nWe're trying to standardize around using the dashboards here, so that while there are differences\nand nuances in the deployments etc., we're speaking a common language, and have a common\nmeeting point for GitLab engineers across teams to monitor and talk GitLab performance.\n\n## Are you convinced about Prometheues yet?\n\nWe're now actively training our support team on Prometheus. And it's likely that other organizations\nprobably have the same thing happening – where another group could be impacting or helping,\nbut they're not collaborating, so they can't see where or how they can help one another. We've\nseen the light! So, we're training our team on Prometheus, and it's something that we want\nto make sure that everybody can make use of.\n\nMany customers think they don't need Prometheus and are reluctant to use it because it adds\noverhead; you have to configure it and set it up, and it may require a bit of finessing. GitLab\nis trying to make that even easier, but right now when you're building a bespoke deployment,\nit requires a bit of time, and you may not think time invested is worth it. And I'm here to say,\nit is, get it now! In fact, it's already there. You just need to turn it on! I'm advocating that all\nlarge, customer deployments over 500 users have Prometheus running by 2019. Turn it on and\nthen we'll all reap the rewards.\n",[749,915,704],{"slug":7040,"featured":6,"template":678},"why-all-organizations-need-prometheus","content:en-us:blog:why-all-organizations-need-prometheus.yml","Why All Organizations Need Prometheus","en-us/blog/why-all-organizations-need-prometheus.yml","en-us/blog/why-all-organizations-need-prometheus",{"_path":7046,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7047,"content":7053,"config":7059,"_id":7061,"_type":16,"title":7062,"_source":17,"_file":7063,"_stem":7064,"_extension":20},"/en-us/blog/how-we-built-gitlab-geo",{"title":7048,"description":7049,"ogTitle":7048,"ogDescription":7049,"noIndex":6,"ogImage":7050,"ogUrl":7051,"ogSiteName":692,"ogType":693,"canonicalUrls":7051,"schema":7052},"How we built GitLab Geo","Take a deep dive into the many architectural decisions we made while building GitLab Geo.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678985/Blog/Hero%20Images/how-we-built-geo-cover.jpg","https://about.gitlab.com/blog/how-we-built-gitlab-geo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we built GitLab Geo\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gabriel Mazetto\"}],\n        \"datePublished\": \"2018-09-14\",\n      }",{"title":7048,"description":7049,"authors":7054,"heroImage":7050,"date":7056,"body":7057,"category":14,"tags":7058},[7055],"Gabriel Mazetto","2018-09-14","\n[Geo](https://docs.gitlab.com/ee/administration/geo/index.html), our solution for read-only mirrors of your GitLab instance, started with our co-founder [Dmitriy Zaporozhets](/company/team/#dzaporozhets)’ crazy idea of making not only the repositories, but the entire GitLab instance accessible from multiple geographical locations.\n\nAt that time (Q4 of 2015) there were only a few competitors trying to provide an *automatic mirroring* solution for repositories and/or issue trackers, and they were mostly built around an additional independent instance and a bunch of webhooks to replicate events. Also, in those cases, no other data was shared outside this asynchronous replication channel, and you had to set up the webhook per project and take care of the users yourself. Long story short: this was not practical for any instance with more than a couple of projects.\n\nWe also had a previous experience early that year [using DRBD to migrate 9 TB of data](/blog/moving-all-your-data/) from our dedicated co-location hosting to the AWS cloud,\nwhich didn't provide the scale, performance, or the UX we had in mind for the future.\n\nHere's the history of how we built Geo:\n\n## Phase 1: MVP\n\nGeo's first mission was to provide people who were located in satellite offices, or in distant locations, with fast access to the tools they need to get work done. The plan was not only to make it faster for Git clones to occur in remote offices but also to provide a fully functional read-only version of GitLab: all project issues, Git repositories, Wikis, etc. automatically synchronized from the primary with as little delay as possible.\n\nTo get there we made a few architectural decisions:\n\n#### 1. Use native database replication\n\nThis would allow us to replicate any user-visible information, user content, user and permissions, projects, any project relation to groups/namespaces, etc. Basically, any data ever written to the database in the primary node made readily available to the others, without any extra communication overhead in the webhooks.\n\nIt is also the most [Boring Solution](https://handbook.gitlab.com/handbook/values/#efficiency), as it uses proven technologies developed for databases in the past two decades. To simplify the endeavor we decided to support only PostgreSQL.\n\n#### 2. Use API calls to notify any secondary node of changes that should happen on disk\n\nThis is the second synchronization mechanism. If a new project is created or a repository updated, this notification lets any other node know they have this pending action, and should replicate the new data on disk.\n\n#### 3. Use Git itself to replicate the repositories\n\nWe investigated many alternatives to replicate our repositories, from using basic UNIX tools (like `rsync` or equivalent) to specific distributed file-systems features. We were aiming for a simple solution, as ideally we had to support the lowest common denominator, which is a Linux machine running the default filesystem (ext3 or 4). That limitation ruled out any distributed file-system based implementation.\n\nWe considered `rsync` and its variants as well, which could potentially work for our use case, but that would add significant CPU for each synchronization operation, and we expect it to increase as the repositories get bigger and bigger.\n\nBy using `rsync` we would need to grant more on-disk permissions than we were comfortable doing, and restricting its reach could be an engineering challenge in itself.\n\nThe same can be said for `scp` and its variants. In the end, we decided to use Git itself and benefit from its internal protocol. This was a no-brainer and very easy decision to make. We understood the protocol enough and we already had the required safeguards in place. All we needed was a slightly different authentication mechanism for the node-to-node synchronization.\n\n#### 4. Always push code to the primary, pull code from anywhere\n\nWhen we started Geo, there was no bundled Git support for having a multi-repository \"transactional\" replication, or information on how to implement one.\n\nWe figured out quickly that to implement something on that line it would require either a *global lock* or to implement a variant of [RAFT](https://raft.github.io/)/[PAXOS](https://en.wikipedia.org/wiki/Paxos_(computer_science)) on top of Git internal protocol.\n\nBoth solutions have their downsides and tradeoffs, and adding to that the time and effort to build it correctly, led us to opt for the simplest implementation: always push to the primary, notify secondaries that repository data changed, and have the secondaries fetch the changes. This is also in line with our motto of [Boring Solutions](https://handbook.gitlab.com/handbook/values/#efficiency).\n\nThe initial repository synchronization is no different than doing a `git clone \u003Cremote> --mirror`. The same idea goes for the repository updates, they behave very similarly to a `git fetch \u003Cremote> --prune`. The difference is that we need to replicate additional, internal metadata as well, that is not normally exposed to a regular user.\n\n![GitLab Geo - MVP Synchronization Architecture Diagram](https://about.gitlab.com/images/blogimages/how-we-built-geo/geo-architecture-mvp.png){: .medium.center}\n\n#### 5. Don’t replicate Redis data between nodes\n\nWe initially thought we could replicate Redis as well as the main database in order to share cached data, session information, etc. This would allow us to implement a Single Sign-On solution very easily, and by reusing the cache we would speed up the initial page load.\n\nAt that time Redis only supported **Leader** to **Follower** replication mode and even though it is usually super fast when used in a local network, the fact remains that replicating data across disparate geographical locations can add significant latency.\n\nThis additional latency would impact on the initial objective of simplifying the Single Sign-On implementation. If you simply log in on the primary node and get redirected to the secondary, chances are that the session information would still not be available on the secondary node due to the replication latency.\n\nThat would eventually fix itself by redirecting back and forth, but if the latency is significant enough, your browser will terminate the connection based on the redirect loop prevention feature. Another downside of this approach is that it creates a hard dependency on the primary node being online, or otherwise the secondary node would be inaccessible and/or completely broken.\n\nIn addition to all these issues, we needed an additional Redis instance that supports writing data to it, in order to persist Jobs to our Jobs system on the secondary node.\n\nSo it made sense, in the end, to give up on the idea of replicating Redis, and we started looking for a solution to the authentication problem.\n\n#### 6. Authenticate on the primary node only\n\nBecause we can’t write on the main database of secondary nodes, any auditing logs, brute force protection mechanism, password recovery tokens, etc. can’t have their data and state persisted inside secondary nodes. The only viable solution then is to authenticate on the primary and redirect the user to the secondary.\n\nThis decision also helped with the integration of any company-specific authentication systems. If a company uses internal authentication based on LDAP, CAS or SAML for instance, then they wouldn't have to replicate that system to the other location or configure firewall rule exceptions to accept traffic over the internet.\n\n#### 7. Implement Single Sign-On and Single Sign-Off using OAuth\n\nWith the previous Redis limitations in mind, we looked into alternatives to implement the authentication. We had to choose between either CAS or an OAuth-based one. As we already had OAuth Provider support inside GitLab, we decided to go with that.\n\nBasically, for any Geo node configured in the database we also have a corresponding OAuth application inside GitLab, and whenever a new user tries to log into a Geo node, they get redirected to the primary node and need to \"allow\" the \"Geo application\" to have access to their account credentials at the first login.\n\nThe shortcoming here is that if you are not logged in already and the primary goes down, you can't log in again until the primary node connectivity issue is fixed.\n\n#### 8. Build a read-only layer on the application side to prevent accidents\n\nWe needed this safeguard in place in case any required subsystem was misconfigured. With the read-only layer, we can prevent the instance from diverging from the primary in a non-recoverable way. It's also this layer that prevents anyone from pushing a repository change to the secondary node directly.\n\n#### 9. Don’t replicate any user attachments yet, just redirect to the primary\n\nInstead of trying to replicate user attachments at this stage, we decided to just rewrite the URLs pointing the resource to the primary node instead. This allowed us to iterate faster and still provide a decent experience to the end users.\n\nThey would still enjoy faster access to the repository data and have the web UI rendering the content from a closer location, with the exception of the issue/merge request attachments, avatars etc, which were still being fetched from the primary. But as they are also highly cachable the impact is minimal.\n\nThis was the initial foundation that allowed us to validate Geo as a viable solution. Later on, we took care of replicating the missing data as well.\n\n### Bonus trivia\n\nThe term **Geo** came only after a while, it was previously named as **GitLab RE** (*Read-Only Edition*), followed by **GitLab RO** (*Read Only*) before getting its final name: **GitLab Geo**.\n\n## Phase 2: First-generation synchronization mechanism\n\nWith the MVP implementation done, we needed to pave the way for a stable release. The first part we decided to improve was the notification mechanism for pending changes. During the MVP, we built a custom API endpoint and a buffered queue. That queue was also optimized to store only unique, idempotent events. If a project received three push events in the last few seconds, we only needed to store and process one event notification.\n\nWe decided that instead of building our own custom notification \"protocol\" and implementing some early optimizations, we should leverage existing GitLab internal capabilities: our own webhooks system.\n\n![GitLab Geo - First Generation Synchronization Architecture Diagram](https://about.gitlab.com/images/blogimages/how-we-built-geo/geo-architecture-first-gen.png){: .medium.center}\n\nBy taking that route, we would be forced to \"[drink our own champagne](https://en.wikipedia.org/wiki/Eating_your_own_dog_food#Alternative_terms)\" and as a result, improve our existing functionality. That decision actually resulted in improvements to our system-wide webhooks in a few ways. We added new system-wide webhook events, expanded the granularity of the information available, and fixed some performance issues.\n\nWe've also improved the security of our webhooks implementation by adding ways of verifying that the notification came from a trusted source. Previously the only way to do that relied on whitelisting the originating IP address as a way to establish trust.\n\nThis security limitation was not present in the MVP version, as we reused the admin personal token as the authorization mechanism for the API, which is also not ideal, but better than previous webhook implementation.\n\nI consider this to be the first generation of the synchronization mechanism that was used in the wild. It had a few characteristics: it reacted almost like real-time for small updates, webhook was fast enough and parallelizable to be used on the scale we wanted to support.\n\nAs the very first version of Geo was only concerned with getting repositories available and in-sync, from one location to the other, that's where we focused all of our efforts. At that time, setting up a new Geo node required an almost identical clone of the primary to be available in advance. That included not only replicating the database but also *rsyncing* the repositories from one node to the other. For improved consistency, we required initially a *stop the world* phase in order to not lose changes made during the time between when the backup started and when the secondary node got completely set up.\n\nWhile this was still closer to a barebones solution, it already provided value for remote teams working together in a shared repository or simply in any project that needed to synchronize code between different locations. We had a few customers trying it out and evaluating the potential, but it was still not ready for production use as we were still missing a lot of functionality.\n\nThe *stop the world* phase previously mentioned got phased out later with the help of improved setup documentation. Much later, a good chunk of the initial cloning step got simplified by leveraging some improvements in the next-generation synchronization and by introducing a backfilling mechanism.\n\n### First-generation synchronization pitfalls\n\nWhile our first-generation solution worked fine for the highly active repositories, the use of webhooks as a notification mechanism had some really obvious drawbacks.\n\nIf, for any reason, the notification failed to be delivered, it had to be rescheduled and retried. Also because we were using our internal Jobs system to dispatch the webhooks, having a node go dark for a few hours meant our Jobs system would be busy retrying operations over an unreachable destination for at least that same amount of time.\n\nDepending on the volume of data and how long it has been accumulating changes, that could even fill up the Redis instance disk storage. If that ever happened we would have to resync the whole instance again and start from scratch.\n\nWe've improved the retrying mechanism with custom Geo logic to alleviate the problem, but it was clear to us that this was not going to be a viable solution for a Generally Available (*stable*) release.\n\nAlso because of backoff algorithm in the retrying logic, in conjunction with the asynchronicity aspect of the system, it could lead to important changes taking a lot of time to replicate, especially in less active projects. The busiest ones were less affected, as any update to the repository would get it to the current state rather than to the state when the update notification was issued. And because the project is receiving many updates during the day, it's expected to generate also many notification events.\n\nAny implementation misstep between sending the webhook or receiving and processing it on the other side could mean we would lose that information forever. This was again not a major issue with highly active projects, as it would eventually receive a new, valid update notification which would sync it to the current state, but the outliers could miss it until someone notices or another update arrives much later.\n\nWe also wanted to make Geo a viable Disaster Recovery solution in the long term, so missing updates without a way to recover from it was not an option.\n\n## Phase 3: Second-generation synchronization mechanism\n\nWe started looking for alternative ways of notifying the secondary nodes and also considered switching to other standalone queue systems instead. We were also worried about the lack of control over the order in which the operations would happen in a parallel and asynchronous replication system and on the effect it had on the data on disk.\n\nA few examples of situations that can happen because of the parallelism and the async nature of it:\n\n1. A project removal event can be processed before a project update for the same repository\n1. Renaming, creating a project with the new name and sending new content to it, if processed in an incorrect order, can lead to temporary data loss\n\nThere was also the case when the notification arrived before the database had replicated the required data. As an example, when the node receives the notification for new project creation, but the database doesn't have it yet.\n\nThat required the secondary node to keep a \"mailbox\" until the received events are ready to be processed. As they were basically Jobs, that meant keep retrying until the job succeeded.\n\nConsidering all the complexity we had brought to the application layer, we investigated a few standalone queue systems to which we could offload the burden, but decided ultimately to build an event queue mechanism in PostgreSQL instead, as it had three important advantages:\n\n#### 1. No extra dependencies\nWe were already replicating the database, so there is no need to install, configure and maintain another process, worry about backing up yet another component, integrate it in our Omnibus package, and provide support for our users.\n\n#### 2. No more delayed processing\nIf the event arrives on the other side, the data associated with it will already be there as well. We can also guarantee consistency with transactions and repeat less information than with the webhooks implementation.\n\n#### 3. Easy to retry/restore from backup or in a disaster situation\nWith a standalone queue system, to have a consistent backup solution you either need some sort of \u003Cabbr title=\"Write-Ahead Logging\">WAL\u003C/abbr> files that could help rebuild a consistent state between the systems or do backups in a \"stop the world\" way, otherwise, you may lose data.\n\n### Our implementation\n\nWe took inspiration from how other log-based replication systems work (like the database) and implemented it with a central table as the main source of truth and a few others to hold bookkeeping for specific event types. Any relevant information we used to ship with the webhook notification is now part of this implementation, with extras to support the missing replicable events.\n\nOn the secondary node, these new tables are read by a specific daemon (we call it the Geo Log Cursor), and as the name suggests, it holds a persistent pointer of the last processed event. This allows us to also report the state of replication and monitor if our replication is broken. We also made it highly available, so you can boot up one as **Active** and keep a few extras as **Standby**. If the Active daemon stops responding for a specified amount of time a new election starts and one of the Standbys takes place as the new Active.\n\nThe second part of the new system requires a persistent layer on the secondary node to keep any synchronization state and metadata. This was done by using another PostgreSQL instance.\n\nWe couldn’t reuse the same main instance, as we were replicating with *Streaming replication* mode. With *Streaming replication*, the whole instance is replicated, and you can’t perform any change in it. The alternative to being able to replicate and write in the same instance is to use *Logical replication* mode, but at that time, there was no official *Logical Replication* support available in the PostgreSQL versions we supported (PgLogical was also not a viable alternative back then).\n\nWith the new persistence layer (we call it the *Geo Tracking Database*), we had the foundations built to be able to actively compare the \"desired vs actual\" state, and find missing data on any secondary instance. We built a more robust backfilling mechanism based on that as well.\n\nQuerying between the two database instances (the replicated Secondary, and the Tracking Database), were made much faster and scalable by enabling Postgres FDW ([Foreign Data Wrapper](https://www.postgresql.org/docs/9.6/static/postgres-fdw.html)). That allowed us to query data using a few **LEFT JOIN** operations among the two instances, instead of pooling with multiple queries from the application layer against the two databases in isolation.\n\n![GitLab Geo - Second Generation Synchronization Architecture Diagram](https://about.gitlab.com/images/blogimages/how-we-built-geo/geo-architecture-second-gen.png){: .medium.center}\n\n### Other improvements\n\nAnother important shortcoming fixed was how we replicated the SSH Keys. This was technical debt we needed to pay since the first implementation. Historically, GitLab built the SSH authorization mechanism as with many other Git implementations, by writing each user-provided SSH Key to the `AuthorizedKeys` file on the server and pointing each one to our [gitlab-shell](https://gitlab.com/gitlab-org/gitlab-shell) application.\n\nThis implementation allowed us to authenticate the authorized users, and because we control how the Shell application is invoked, we can pass a specific key ID to it, that can be used later to identify the user on our database and authorize/deny operations to specific repositories.\n\nThe problem with this approach, in general, is that the bigger the user base is, the slower the initial request will be, as OpenSSH will have to perform a scan to the whole file (**O(N)** complexity). With Geo, that's not just about speed but any delay in updating this file either to add a new key or to revoke an existing one is very undesirable.\n\nWhen we decided to fix that we did for both Geo and GitLab Core by using an interesting feature present in newer versions of OpenSSH (6.9 and above), that allows overriding the `AuthorizedKeys` step, switching from reading the keys from a file to invoke a specified CLI instead (*O(1)* complexity). You can read more about it [in the documentation here](https://docs.gitlab.com/ee/administration/operations/fast_ssh_key_lookup.html#doc-nav).\n\nWe fixed another shortcoming around the repository synchronization, switching from Git over SSH protocol, to Git over HTTPS. The initial motivation was to simplify the setup steps, but that decision also allowed us to shape the synchronization differently when it was originated from a Geo node, vs a regular request. Internally we store additional metadata in the repository and also commits that may no longer exist in your regular branches, but were part of a previous merge request, or had user comments associated with them.\n\nBy also switching to full HTTP(S), it made it simpler to run our development instances locally with [GDK](https://gitlab.com/gitlab-org/gitlab-development-kit), which helped to improve our own internal development process as well.\n\n## Phase 4: Third-generation synchronization and the path to a Disaster Recovery solution\n\nWhile still working in Phase 3, we discovered another major limitation around how we stored files on disk. GitLab, for historical reasons, stored repositories and file attachments in a similar disk structure as the base URL routes. For group and project `gitlab-org/gitlab-ce` there would be a path on disk that would include `gitlab-org/gitlab-ce` as part of it. The same is true for file attachments.\n\nKeeping both the database and disk in sync, even not considering Geo replication, means that at any time a project is renamed, several things have to be renamed on disk as well.\n\nThis is not only slow and error prone: what should we do if something fails to rename in the middle of the \"transaction?\" This is also problematic when replication comes into place as we are susceptible again to processing it in the correct order or risk a temporarily inconsistent state.\n\nWe tried to find a solution to problems around the order of execution of the events and we came up with three ideas:\n\n1. **Find or build a queue system that is guaranteed to process things in the same order they were scheduled**\n2. **Detect and recover from any replication failure or data corruption**\n3. **Make every replication operation idempotent, removing the queue-ordering requirement completely**\n\nThe first one was easily ruled out, as even if we switched to a queue system with that type of guarantee, it would be either slow due to the lack of parallelism in order to guarantee the order requirement, or will be extremely complex and hard to use as it would require extra care to have the same guarantees while also working in parallel.\n\nWe found no system that satisfied our needs, and even if we considered a standalone queue solution, we would lose the Postgres advantage from the previous generation, of having both the main database and the queue system always in sync.\n\nRuling out the first one, we considered the second idea of detecting and recovering from failures and data corruption as we concluded we needed it for *Disaster Recovery* anyway. Any robust *Disaster Recovery* solution needs to guarantee that the data it is holding is the exact one it's supposed to have. If, for any reason, that data gets corrupt or someone removes it from disk, it needs a way of detecting it and restores it to the desired state.\n\nTo achieve that, we built a robust verification mechanism that generates a checksum of the state of the repository and is stored in a separate table in the primary node. That table gets replicated to secondary nodes, where another checksum is also calculated (and stored in the Tracking Database). If both checksums match, we know the data is consistent. The checksum is recalculated automatically when an update event is processed, but can also be triggered manually.\n\n![Screen Capture - Repository Verification Status](https://about.gitlab.com/images/blogimages/how-we-built-geo/verification-status-primary.png){: .medium.center}\n\nWe used that mechanism to validate all repositories in `gitlab.com` when successfully [migrating from Azure to GCP](/blog/gcp-move-update/), last month.\n\nThe verification mechanism is not enough and while it gives us the guarantees we need, we can do better, which is why we also decided to implement the third idea as well, and make every replication operation idempotent in order to remove any situation where processing the incorrect order of events would put data in a temporarily inconsistent state.\n\nWe are calling that solution the [Hashed Storage](https://docs.gitlab.com/ee/administration/repository_storage_types.html). This is a complete rewrite of how GitLab stores files on disk. Instead of reusing the same paths as present in the URLs, we use the internal IDs to create a hash instead and derive the disk path from that hash. With the Hashed Storage, renaming a project or moving it to a new group requires only the database operations to be persisted, as the location on disk never changes.\n\n![Hashed Storage and Legacy Storage example](https://about.gitlab.com/images/blogimages/how-we-built-geo/hashed-storage-disk-path-example.png){: .medium.center}\n\nBy making the paths on disk immutable and non-conflicting, any `create`, `move` or `remove` operations can happen in any order, and they will never put the system in an inconsistent state. Also replicating a project rename or moving a project from one group/owner to another will require only the database change to be propagated to take full effect on a secondary node.\n\n## What to expect from Geo in the near future\n\nImplementing Geo has been an important effort at GitLab that involved many different areas. It is a crucial infrastructure feature that allowed us to migrate from one cloud provider to another. We also believe it's an important component to support the needs of many organizations today, from providing peace of mind regarding data safety in the events of a Disaster Recovery, to easing the burdens of distributed teams across the globe.\n\nWe've been using the feature ourselves and this allowed us to stress-test the biggest and most challenging GitLab installation, GitLab.com, making sure it will work just as fine for any other customer.\n\nOver the upcoming months we will be focusing on the following items:\n\n* Release a push proxy for Geo secondary nodes: [Pull and push from the same remote transparently](https://gitlab.com/groups/gitlab-org/-/epics/124)\n* Release [Hashed Storage as *Generally Available*](https://gitlab.com/groups/gitlab-org/-/epics/75)\n* Improve configuration: We want to reduce the steps and make it [simpler via automating most steps](https://gitlab.com/groups/gitlab-org/-/epics/367)\n* Improve the verification step: [Improve the signals we use for the checksum](https://gitlab.com/gitlab-org/gitlab-ee/issues/5196)\n* [Improve the Geo UX and UI](https://gitlab.com/groups/gitlab-org/-/epics/369)\n* Keep improving performance and reliability\n* Support replication of [GitLab Pages](https://gitlab.com/gitlab-org/gitlab-ee/issues/4611) and the internal [Docker Registry](https://gitlab.com/gitlab-org/gitlab-ee/issues/2870)\n\nCover photo by [NASA](https://unsplash.com/photos/Q1p7bh3SHj8) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[749,915],{"slug":7060,"featured":6,"template":678},"how-we-built-gitlab-geo","content:en-us:blog:how-we-built-gitlab-geo.yml","How We Built Gitlab Geo","en-us/blog/how-we-built-gitlab-geo.yml","en-us/blog/how-we-built-gitlab-geo",{"_path":7066,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7067,"content":7073,"config":7079,"_id":7081,"_type":16,"title":7082,"_source":17,"_file":7083,"_stem":7084,"_extension":20},"/en-us/blog/inside-gitlab-security-dashboards",{"title":7068,"description":7069,"ogTitle":7068,"ogDescription":7069,"noIndex":6,"ogImage":7070,"ogUrl":7071,"ogSiteName":692,"ogType":693,"canonicalUrls":7071,"schema":7072},"Security dashboards secure applications at DevOps speed","GitLab Security Dashboards enable security professionals to view vulnerabilities across a project. Here’s an inside look.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678710/Blog/Hero%20Images/inside-gitlab-security-dashboards.jpg","https://about.gitlab.com/blog/inside-gitlab-security-dashboards","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How can teams secure applications at DevOps speed? Security Dashboards are here to help.\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cindy Blake\"}],\n        \"datePublished\": \"2018-09-14\",\n      }",{"title":7074,"description":7069,"authors":7075,"heroImage":7070,"date":7056,"body":7077,"category":14,"tags":7078},"How can teams secure applications at DevOps speed? Security Dashboards are here to help.",[7076],"Cindy Blake","\nBusiness survival today depends on a radically faster DevOps lifecycle, but how can teams secure applications at DevOps speed? It’s a thorny problem for a number of reasons: applications are a prime target for cyber attacks; most [application security](/topics/devsecops/) tools are resource intensive, requiring integration of both technology and processes; and testers face the dilemma of when and how often to test code that is iteratively changed right up until it’s deployed. Many are faced with weighing the need to test each iteration against the speed and cost of doing so, while the possibility of a rollback looms in the case of an unforeseen security vulnerability.\n\n>Many are faced with weighing the need to test each iteration against the speed and cost of doing so\n\nWe know that shifting left and discovering vulnerabilities earlier in the development process is important, but it’s tough to find the perfect balance, where teams can be confident they’re truly creating business value and not becoming a business inhibitor. It’s clear that our existing application security tools are colliding with modern development. So what if you could scan all code, every time for development, using fewer tools instead of more, and have developers and operations on the same page instead of adversarial?\n\n### Built-in security products\n\nIt’s going to take a fundamental shift by companies towards proactive security. With security issues reported directly in merge requests, one license cost for integrated security, and zero context-switching to proactively secure applications, we believe GitLab can help get you there.\n\nUsing multiple tools forces developers to switch away from their primary objective of developing code, or requires integrated workflows with security pros. We believe successful tools will add high value while minimizing distraction for engineers. With GitLab, [SAST](https://docs.gitlab.com/ee/user/application_security/sast/), [DAST](https://docs.gitlab.com/ee/user/application_security/dast/), [container scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/), [dependency scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/), and [license management](https://docs.gitlab.com/ee/user/compliance/license_compliance/index.html) are all built in. Because there’s one tool for the software development lifecycle, you can automatically run tests on all code commits, early in the development process.\n\n### Security Dashboard demo\nIn 11.1, [we shipped Security Dashboards](/releases/2018/07/22/gitlab-11-1-released/), to help serve security professionals. Traditionally we’ve focused on the developer, but the Security Dashboard is meant to enable security professionals to view vulnerabilities across a project. Here’s a quick look at our first iteration of the Security Dashboard:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/U2_dqwTRUVk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nKeep an eye out for [improvements](https://gitlab.com/gitlab-org/gitlab-ee/issues/6709), and let us know what you think by tweeting us [@gitlab](https://twitter.com/gitlab)!\n\nCover photo by [Christian EM](https://unsplash.com/photos/J7EUjSlNQtg) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[894,749,915,1307],{"slug":7080,"featured":6,"template":678},"inside-gitlab-security-dashboards","content:en-us:blog:inside-gitlab-security-dashboards.yml","Inside Gitlab Security Dashboards","en-us/blog/inside-gitlab-security-dashboards.yml","en-us/blog/inside-gitlab-security-dashboards",{"_path":7086,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7087,"content":7093,"config":7099,"_id":7101,"_type":16,"title":7102,"_source":17,"_file":7103,"_stem":7104,"_extension":20},"/en-us/blog/the-road-to-gitaly-1-0",{"title":7088,"description":7089,"ogTitle":7088,"ogDescription":7089,"noIndex":6,"ogImage":7090,"ogUrl":7091,"ogSiteName":692,"ogType":693,"canonicalUrls":7091,"schema":7092},"GitLab no longer requires NFS: The road to Gitaly v1.0","How we went from vertical to horizontal scaling without depending on NFS by creating our own Git RPC service.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670092/Blog/Hero%20Images/road-to-gitaly.jpg","https://about.gitlab.com/blog/the-road-to-gitaly-1-0","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The road to Gitaly v1.0 (aka, why GitLab doesn't require NFS for storing Git data anymore)\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Zeger-Jan van de Weg\"}],\n        \"datePublished\": \"2018-09-12\",\n      }",{"title":7094,"description":7089,"authors":7095,"heroImage":7090,"date":7096,"body":7097,"category":14,"tags":7098},"The road to Gitaly v1.0 (aka, why GitLab doesn't require NFS for storing Git data anymore)",[4476],"2018-09-12","\nIn the early days of [GitLab.com](https://gitlab.com), most of the application,\nincluding Rails worker processes, Sidekiq background processes, and Git storage,\nall ran on a single server. A single server is easy to deploy to and maintain.\nThe same structure is what most smaller GitLab instances still use for their\nself-managed [Omnibus](https://docs.gitlab.com/omnibus/) installation. Scaling\nis done vertically, meaning; adding more RAM, CPU, and disk space.\n\n## Moving from vertical to horizontal scaling\n\nSoon we ran out of options to continue scaling the system vertically, and we had\nto move to scaling horizontally by adding new servers. To have the repositories\navailable on all the nodes, NFS (Network File System) was used to mount these to each application\nserver and background workers. NFS is a well-known technology for sharing file\nsystems across a network. For each server, each storage node needed to be\nmounted. The advantage: GitLab.com could keep adding more servers and scale. However NFS\nhad multiple disadvantages too: the visibility is decreased to what type of file\nsystem operation is performed. Even worse, one NFS storage node's outage impacted\nthe whole site, and took the whole site down. On the other hand, Git operations\ncan be quite CPU/IOPS intensive too, so we began a balancing act between adding more nodes,\nand thus reducing reliability, versus scaling nodes vertically.\n\n## Considering NFS alternatives\n\nOver two years ago, we started to look for alternatives. One of the first ideas\nwas to remove the dependency on NFS with [Ceph](https://ceph.com/).\nCeph is a distributed file system that was meant to replace NFS in an\narchitecture like ours. Like NFS, this would solve our scaling problem on the\nsystem level, meaning that little to no changes would be required to GitLab as\nan application. However, running a Ceph cluster in the cloud didn't have the\nperformance characteristics that were required. Briefly we flirted with the idea\nof [moving away from the cloud][no-cloud], but this would have had major implications\nfor our own infrastructure team, and given that many of our customers _do_ run in\nthe cloud, [we decided to stay in the cloud][yes-cloud].\n\n[no-cloud]: /blog/why-choose-bare-metal/\n[yes-cloud]: /2017/03/02/why-we-are-not-leaving-the-cloud/\n\n## Introducing Gitaly\n\nSo it was clear that the application needed to be redesigned, and a new service\nwould be introduced to handle all Git requests. We named it\n[Gitaly](https://gitlab.com/gitlab-org/gitaly).\n\n![Gitaly Architecture Diagram](https://about.gitlab.com/images/gitaly_arch.png){: .large.center}\n*\u003Csmall>The planned architecture at the project start\u003C/small>*\n\nAs the diagram shows, the new Git server would have a number of distinct clients.\nTo make sure the protocol for the server and its clients is well defined,\n[Protocol Buffers][protobuf] was used. The client calls are handled by\nleveraging [gRPC][grpc]. Combined, they allowed us to iteratively add RPCs and\nmove away from NFS, in favor of an HTTP boundary. With the technologies chosen,\nthe migration started. The ultimate goal: v1.0, meaning no disk access was\nrequired to the Git storage nodes for [GitLab.com](https://gitlab.com).\n\nShipping such an architectural change should not influence the performance, nor\nthe stability of the self-managed installations of GitLab, so for each RPC a [feature\nflag](https://docs.gitlab.com/ee/development/feature_flags/index.html) gated the use of it. When the RPC had gone through a series of tests on both\ncorrectness and performance impact, the gate was removed. To determine stability we used\n[Prometheus](https://docs.gitlab.com/ee/administration/monitoring/prometheus/) for monitoring and the ELK stack for sifting through massive numbers of structured log messages.\n\nThe server was written in Go, while the application is a large Rails monolith.\nRails had a great amount of code that was still very valuable. This code got\nextracted to the `lib/gitlab/git` directory, allowing easier vendoring. The idea\nwas to start a sidecar next to the Go server, reusing the old code. About once a week the\ncode would be re-vendored. This allowed Ruby developers on other teams to\nwrite code once, and ship it. Bonus points could be earned if [the boilerplate code][gitaly-ruby]\nwas written to call the same function in Ruby!\n\n[protobuf]: https://developers.google.com/protocol-buffers/\n[gitaly-ruby]: https://gitlab.com/gitlab-org/gitaly/blob/232c26309a8e9bef61262ccd04a8f0ba75e13d73/doc/beginners_guide.md#gitaly-ruby-boilerplate\n[grpc]: https://grpc.io/\n\nThe new service wasn't all sunshine and rainbows though, at times it felt like\nthe improved visibility was hurting our ability to ship. For example, it became\nclear that the illusion of an attached disk created\n[N + 1 problems][rails-eager-loading]. And even though this is a well-known problem\nin Ruby on Rails, the tools to combat it are all tailored toward using it with\nActiveRecord, Rails' ORM.\n\n[rails-eager-loading]:https://guides.rubyonrails.org/active_record_querying.html#eager-loading-associations\n\n## Nearing v1.0\n\nWith each RPC introduced, v1.0 was getting closer and closer. But how could we be\nsure everything was migrated before unmounting all NFS mount points? A trip\nswitch got introduced, guarding the details required to get to the full path of each\nrepository. Without this data there was no way to execute any Git operation\nthrough NFS. Luckily, the trip switch never went off, so now it was clear NFS\nwasn't being used. The next step was unmounting on our staging environment! Again, this was very\nuneventful. Leaving the volumes unmounted for a full week, and not seeing any\nindication of unexpected errors, the logical next step was our production instance.\n\nDays later we started rolling out these changes to production: first the\nbackground workers were unmounted, than we moved onto higher impact services. At\nthe end of the day, all drives were unmounted without customer impact.\n\n## What's next?\n\nSo, where is this v1.0 tag? We didn't tag it, and I don't think we will. v1.0 is\na state for our Git infrastructure, and a goal for the team, rather than the code base.\nThat being said, the next mental goal is allowing all customers to run without NFS.\nAt the time of writing, some features like administrative tasks, aren't using Gitaly just\nyet. These are slated for [v1.1][gitaly-11], and our next objective.\n\nWant to know more about our Gitaly journey? Read about [how we're making your Git data highly available with Praefect](/blog/high-availability-git-storage-with-praefect/) and [how a fix in Go 1.9 sped up our Gitaly service by 30x](/blog/how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x/).\n{: .alert .alert-info .text-center}\n\n[gitaly-11]: https://gitlab.com/groups/gitlab-org/-/epics/288\n\nPhoto by [Jason Hafso](https://unsplash.com/photos/8Sjcc4vExpg) on Unsplash\n{: .note}\n",[915,702,704],{"slug":7100,"featured":6,"template":678},"the-road-to-gitaly-1-0","content:en-us:blog:the-road-to-gitaly-1-0.yml","The Road To Gitaly 1 0","en-us/blog/the-road-to-gitaly-1-0.yml","en-us/blog/the-road-to-gitaly-1-0",{"_path":7106,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7107,"content":7113,"config":7118,"_id":7120,"_type":16,"title":7121,"_source":17,"_file":7122,"_stem":7123,"_extension":20},"/en-us/blog/deep-dive-into-gitlabs-ux-design-process",{"title":7108,"description":7109,"ogTitle":7108,"ogDescription":7109,"noIndex":6,"ogImage":7110,"ogUrl":7111,"ogSiteName":692,"ogType":693,"canonicalUrls":7111,"schema":7112},"A deep dive into GitLab's UX design process","The UX team shares how they communicate, plan, share, and tackle improvements one iteration at a time.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678759/Blog/Hero%20Images/designwebcast.jpg","https://about.gitlab.com/blog/deep-dive-into-gitlabs-ux-design-process","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A deep dive into GitLab's UX design process\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2018-09-05\",\n      }",{"title":7108,"description":7109,"authors":7114,"heroImage":7110,"date":7115,"body":7116,"category":14,"tags":7117},[6768],"2018-09-05","\nThe [UX team](/handbook/product/ux/#ux-at-gitlab) recently gathered to share\nhow they collaborate in a fully remote environment. Our team of two UX researchers\nand nine UX designers spans eight countries and six time zones. In this webcast,\nthey discussed UX research, community contributions, and hiring, making it an\nexcellent resource in helping you learn more about\n[GitLab design](https://gitlab.com/gitlab-org/gitlab-design/#gitlab-design).\n\n### Watch the webcast\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/6R64hHkkgtE\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## What we covered\n\nThe UX team generously provided insight into their workflow and projects. Below\nare a few of our favorite takeaways.\n\n### Iteration\n\nAt GitLab, [iteration](https://handbook.gitlab.com/handbook/values/#iteration) means making the smallest\nthing possible and getting it out as quickly as possible, helping us reduce the\ncycle time and rapidly get feedback from users so that we can continue to improve\nquickly and efficiently. Planning too far ahead without getting real-world\nfeedback can cause you to build something that doesn't meet user needs.\n\n### UX Research\n\nThe goal of UX research is to understand the needs and concerns of users, often\nby observing how they interact with a product or by gathering data through\nvarious methods. At GitLab, we often use survey research, feasibility testing,\nuser interviews, and card sorting to understand our users. We discuss the\nresults with product managers to help us prioritize feedback and determine the\nnext steps to implement the findings.\n\n### GitLab Design System\n\nOne of the team's major initiatives last year was  the\n[GitLab Design System](https://design.gitlab.com/), which\nincludes content guidelines, usability patterns, foundational styles, and reusable\ncomponents. The team shifted its focus towards system thinking to create\nconsistency throughout the product and predictability across experiences. The UXers\nhave been working closely with our frontend team to implement our system\niteratively.\n\nEvery designer writes usage guidelines during every milestone and\npicks at least one issue within the issue tracker to contribute to the project.\nThe design system is open source, just like the rest of GitLab, so everyone is\nencouraged to question any of the decisions we've made or contribute by making\nthings clearer or adding missing content.\n\n### How you can contribute to GitLab’s UX designs\n\nAs an open source company, we believe in transparency, so we share almost\neverything we do, including source files, artifacts, deliverables, case studies,\n[UX research](https://gitlab.com/gitlab-org/ux-research#research-archive), and\nour findings. Being open source allows the community to learn from us, and for\nus to learn from the community. There are issues that have been\nlabeled '[Accepting merge requests](https://gitlab.com/gitlab-org/gitlab-ce/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=Accepting+merge+requests&label_name[]=UX)'\nand they need some UX work. Most of these are very small issues, making them the\nperfect starting point for first-time contributors. If you have an idea for a UX\nimprovement, we encourage you to create an issue using the feature proposal\ntemplate to describe the problem you're trying to solve and your proposed solution.\n\nOur UX researchers encourage community contributions, so if you're interested\nin exploring a research question, you're welcome to create an issue using a\nsearch proposal template in the\n[UX research project](https://gitlab.com/gitlab-org/ux-research#contributing).\nIf you’d like to help shape the future of GitLab, we’d love to invite you to\njoin [GitLab First Look](/community/gitlab-first-look/).\n\nThe UX team is happy to chat with you about your contribution,\nand we'll try to get back to you as soon as we can.\n\n### Join us!\n\nOur UX team is growing, and we'd love to work with you! We're currently looking\nfor three UX designers with an interest in our products. So, whether that's the\ndevelopment side or the operations side, we have a lot going on, and we have\nsomething for everyone. We're recruiting for specific teams, including Release\nand Verify, Monitor, and Secure teams. If you're interested in working with our\ntalented (and fun!) UX team, we encourage you [to apply](/jobs/)!\n\n[Cover image](https://unsplash.com/photos/MGBgTX1Zmpo) by [Chris Barbalis](https://unsplash.com/@cbarbalis), licensed\nunder [CC X](https://unsplash.com/license).\n{: .note}\n",[915,1144,5240,959],{"slug":7119,"featured":6,"template":678},"deep-dive-into-gitlabs-ux-design-process","content:en-us:blog:deep-dive-into-gitlabs-ux-design-process.yml","Deep Dive Into Gitlabs Ux Design Process","en-us/blog/deep-dive-into-gitlabs-ux-design-process.yml","en-us/blog/deep-dive-into-gitlabs-ux-design-process",{"_path":7125,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7126,"content":7131,"config":7136,"_id":7138,"_type":16,"title":7139,"_source":17,"_file":7140,"_stem":7141,"_extension":20},"/en-us/blog/gitlab-pages-update",{"title":7127,"description":7128,"ogTitle":7127,"ogDescription":7128,"noIndex":6,"ogImage":6819,"ogUrl":7129,"ogSiteName":692,"ogType":693,"canonicalUrls":7129,"schema":7130},"Update about GitLab Pages","If you are using GitLab Pages with a custom domain, you may need to update your DNS.","https://about.gitlab.com/blog/gitlab-pages-update","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Update about GitLab Pages\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David Smith\"}],\n        \"datePublished\": \"2018-08-28\",\n      }",{"title":7127,"description":7128,"authors":7132,"heroImage":6819,"date":7133,"body":7134,"category":14,"tags":7135},[2463],"2018-08-28","\n\nAfter completing our move to Google Cloud Platform (GCP) on August 11, 2018, GitLab.com traffic has been served from our new infrastructure in GCP. For GitLab Pages users, we left a proxy in place in Azure to be backwards compatible for those Pages users who had an A record pointing to the IP Address at our Azure location.\n\nWe had planned a graceful window to let people have time to migrate their DNS records.  In our [July GCP move update](/blog/gcp-move-update/), we referenced the new IP address at GCP that people should use.\n\nIn that transition, users should have moved their DNS records from 52.167.214.135 to 35.185.44.232.\n\nThis week, we started cleanup of parts of our now legacy Azure infrastructure. Unfortunately, that cleanup also caught up the Azure load balancer that had the old 52.167.214.135 IP address for the GitLab pages proxy. We quickly filed a ticket to see if we could reclaim the IP address, but could not be guaranteed that we could get it back when we rebuilt the load balancer. This post is to get the information out for those Pages users who have been affected by this change.\n\n### What you need to know:\n\nIf you are using GitLab Pages with a custom domain AND you have an A record in DNS that points to the old Azure IP, you will need to update your DNS:\n\n|from IP (old)|to IP (new)|\n|",[728,873,1204,1002],{"slug":7137,"featured":6,"template":678},"gitlab-pages-update","content:en-us:blog:gitlab-pages-update.yml","Gitlab Pages Update","en-us/blog/gitlab-pages-update.yml","en-us/blog/gitlab-pages-update",{"_path":7143,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7144,"content":7149,"config":7154,"_id":7156,"_type":16,"title":7157,"_source":17,"_file":7158,"_stem":7159,"_extension":20},"/en-us/blog/gitlab-markdown-tutorial",{"title":7145,"description":7146,"ogTitle":7145,"ogDescription":7146,"noIndex":6,"ogImage":6292,"ogUrl":7147,"ogSiteName":692,"ogType":693,"canonicalUrls":7147,"schema":7148},"A 5-minute Markdown tutorial","New to GitLab? New to Markdown? Here's a quick explainer on using Markdown to format text all over GitLab.","https://about.gitlab.com/blog/gitlab-markdown-tutorial","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A 5-minute Markdown tutorial\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2018-08-17\",\n      }",{"title":7145,"description":7146,"authors":7150,"heroImage":6292,"date":7151,"body":7152,"category":14,"tags":7153},[4182],"2018-08-17","\n\nAt GitLab, we love [Markdown](https://docs.gitlab.com/ee/user/markdown.html) for providing a simple, clean way to add styling and formatting to plain text, that's visible and repeatable across multiple applications. This means you can copy and paste the text without losing the formatting, and it makes [reviewing diffs](https://docs.gitlab.com/ee/development/merge_request_concepts/diffs/) easier, as you're still reviewing plain text with no hidden data.\n\n## What is Markdown?\n\nMarkdown is a lightweight markup language created by John Gruber in 2004. Markdown lets you add formatting elements to plaintext text documents. Since its creation, markdown has become one of the world’s most popular markup languages. There are many web-based applications specifically built for writing in Markdown. Markdown syntax is designed to be readable and simple.\n\n## Markdown tutorial\n\nGitLab Product Marketing Manager [William Chia](/company/team/#thewilliamchia) recorded this five-minute Markdown tutorial for another GitLab team-member, so you can see how Markdown works within GitLab:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Ix416lAYRSg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## GitLab Flavored Markdown\n\nGitLab uses [GitLab Flavored Markdown](https://gitlab.com/help/user/markdown#gitlab-flavored-markdown-gfm) (GFM) for other handy functionality not supported by standard Markdown. Here are a few useful things you can do with GFM:\n\n### Reference issues, commits, merge requests, or team members\n\nWhen you type `#12` (or any number) in an issue, it will automatically create a link to the corresponding issue in that project. You can also [easily reference other GitLab-specific items](https://gitlab.com/help/user/markdown#special-gitlab-references).\n\n### Autolink URLs\n\n You don't have to use the standard `[]()` format to create a link: just pasting the URL will [autolink it](https://gitlab.com/help/user/markdown#url-auto-linking).\n\n### Create diagrams and flowcharts\n\nIn [GitLab 10.3](/releases/2017/12/22/gitlab-10-3-released/#flow-charts-sequence-diagrams-and-gantt-diagrams-in-gitlab-flavored-markdown-gfm-with-mermaid) we added the ability to [generate diagrams and flowcharts](https://gitlab.com/help/user/markdown#mermaid) using [mermaid](https://mermaidjs.github.io/).\n\n### Quick actions\n\nOpen or close issues, reassign merge requests, add todos, unsubscribe from issues – these are just a few things you can do with GFM [quick actions](https://docs.gitlab.com/ee/user/project/quick_actions.html), all without leaving your keyboard. Just type `/` and a list of options will appear.\n\nThese are just a few examples of GFM – see the [Markdown documentation](https://docs.gitlab.com/ee/user/markdown.html) for a full list. We're adding to it all the time: as of our last release you can quickly [make an issue confidential](/releases/2018/07/22/gitlab-11-1-released/#confidential-issue-quick-action) right from the issue comment field. This was a community contribution, and we invite you to [contribute](/community/contribute/) functionality and quick actions you'd find useful too!\n\n## Benefits of using Markdown\n\nSome may be skeptical of using Markdown when there are other options – like a WYSIWYG editor. But the benefits of using markdown are hard to ignore:\n\n* Markdown is crazy versatile. It can be used for everything including (but not limited to) websites, notes, presentations, emails, and documents of all kinds.\n* Markdown isn’t picky about its operating system. You can create Markdown-formatted text on any device running any operating system.\n* Markdown can be used on the move, so to speak. Markdown-formatted text can be opened using virtually any application. You can also import your Markdown files into another Markdown application if you decide to make a change.\n* The Markdown text you create won’t become obsolete. Even if the application you’re using stops working down the line, you’ll still be able to read your Markdown-formatted text using a text editing application.\nThe fact that it is the backbone of so much web content means that you might be the odd one out if you DON’T use it.\n\n## How to get started with Markdown\n\nThere are a few ways you can learn about how to get started with Markdown.\n\nThe first is to check out online tutorials. You can find a number of resources on Markdown, including the [original guide by John Gruber](https://daringfireball.net/projects/markdown/) and a [Markdown Tutorial](https://www.markdowntutorial.com/) open-source website that you can use to try out Markdown in your web browser.\n\nOr, just try it out with the Notepad application on a device. Since Markdown is just plain text, you can write it in any text editor, such as Notepad. Save a file with the .MD file extension to make a proper Markdown file.\n\nThe second (and a highly encouraged) way to get the hang of Markdown is to check out some [free online Markdown editors](https://www.makeuseof.com/tag/online-markdown-editors/) to test the waters - many of which are great for just learning how to write in Markdown. Markdown editors like StackEdit and Dillinger can help your efforts to get started with Markdown.\n\nFor the most optimal Markdown experience, a writing app that's built for Markdown is typically the best way to go.\n",[2932,915],{"slug":7155,"featured":6,"template":678},"gitlab-markdown-tutorial","content:en-us:blog:gitlab-markdown-tutorial.yml","Gitlab Markdown Tutorial","en-us/blog/gitlab-markdown-tutorial.yml","en-us/blog/gitlab-markdown-tutorial",{"_path":7161,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7162,"content":7168,"config":7173,"_id":7175,"_type":16,"title":7176,"_source":17,"_file":7177,"_stem":7178,"_extension":20},"/en-us/blog/gitlab-auto-devops-in-action",{"title":7163,"description":7164,"ogTitle":7163,"ogDescription":7164,"noIndex":6,"ogImage":7165,"ogUrl":7166,"ogSiteName":692,"ogType":693,"canonicalUrls":7166,"schema":7167},"GitLab Auto DevOps in action","See how the only single application for the entire DevOps lifecycle helps you deliver better software, faster.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664015/Blog/Hero%20Images/laptop.jpg","https://about.gitlab.com/blog/gitlab-auto-devops-in-action","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Auto DevOps in action\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2018-08-10\",\n      }",{"title":7163,"description":7164,"authors":7169,"heroImage":7165,"date":7170,"body":7171,"category":14,"tags":7172},[6626],"2018-08-10","\n\nBetter and faster. These two words best describe the production goals of the IT leaders and engineers building today’s cutting-edge software. And GitLab [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) can help them hit those goals while improving their overall business outcomes.\n\nAs the only single application for the complete [DevOps](/topics/devops/) lifecycle, GitLab Auto DevOps gives development teams all the tools they need to deliver secure, high-quality software at previously unattainable speeds. The secret sauce that makes Auto DevOps so effective is the way it automatically sets up the required integrations and pipeline needed to get your software out of the door faster. With Auto DevOps, your code is automatically tested for quality, scanned for security vulnerabilities and licensing issues, packaged and then set up for monitoring and deployment, leaving engineers with time to place more attention on creating a better product.\n\nThis may all make sense in theory, but as they say, a picture is worth 1,000 words. And it is [rumored](https://idearocketanimation.com/4293-video-worth-1-million-words/?) that video is worth 1.8 million words. With that being said, why not take a look at GitLab Auto DevOps in action? \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/4Uo_QP9rSGM\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWant to learn more about GitLab Auto DevOps? Check out our [documentation](https://docs.gitlab.com/ee/topics/autodevops/), [feature](https://docs.gitlab.com/ee/topics/autodevops/) and [product vision](/direction/) pages.\n\n\nCover photo by [Ash Edmonds](https://unsplash.com/photos/Koxa-GX_5zs) on [Unsplash](https://unsplash.com/)\n{: .note}\n\n",[894,915,1328,727,1307,1286,749],{"slug":7174,"featured":6,"template":678},"gitlab-auto-devops-in-action","content:en-us:blog:gitlab-auto-devops-in-action.yml","Gitlab Auto Devops In Action","en-us/blog/gitlab-auto-devops-in-action.yml","en-us/blog/gitlab-auto-devops-in-action",{"_path":7180,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7181,"content":7187,"config":7193,"_id":7195,"_type":16,"title":7196,"_source":17,"_file":7197,"_stem":7198,"_extension":20},"/en-us/blog/how-devops-and-gitlab-cicd-enhance-a-frontend-workflow",{"title":7182,"description":7183,"ogTitle":7182,"ogDescription":7183,"noIndex":6,"ogImage":7184,"ogUrl":7185,"ogSiteName":692,"ogType":693,"canonicalUrls":7185,"schema":7186},"How DevOps and GitLab CI/CD enhance a frontend workflow","The GitLab frontend team uses DevOps and CI/CD to ensure code consistency, fast delivery, and simple automation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679026/Blog/Hero%20Images/frontendworkflow.jpg","https://about.gitlab.com/blog/how-devops-and-gitlab-cicd-enhance-a-frontend-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How DevOps and GitLab CI/CD enhance a frontend workflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"José Iván Vargas\"}],\n        \"datePublished\": \"2018-08-09\",\n      }",{"title":7182,"description":7183,"authors":7188,"heroImage":7184,"date":7190,"body":7191,"category":14,"tags":7192},[7189],"José Iván Vargas","2018-08-09","\nIt might seem like a lot of what we do on frontend is to make our lives easier,\nbut what I’ve learned in the past two years as a GitLab team-member and a community contributor\nis that if we make our lives easier, we can make a lot of customers happier, too.\nOver the years, I’ve experienced many changes at GitLab, from a change in processes\nto an increase in team members. From an early stage, the frontend team has been\ncommitted to continuous improvements, but working in a rapidly growing team\nrequired an investment in the way we work.\n\nWhen I joined GitLab we still used some of the default conventions that the [Rails\nframework](/blog/upgrade-to-rails5/) recommended for the frontend, and it helped us for quite a while, but\nthe more code we touched, the more code we needed to test and build for\nperformance, making it more challenging for us to maintain. The frontend team\nrealized that we needed a way to facilitate code consistency, fast delivery, and\nsimple automation, so we decided to incorporate [DevOps](/topics/devops/) and\n[CI/CD](/solutions/continuous-integration/) into our workflow.\n\n## Frontend DevOps and CI/CD workflow\n\nWe used CI in a few scenarios, including using linters to help write a consistent\nstyle of code throughout GitLab, but in the case of our JavaScript code, we\nrealized that building for performance and maintainability was becoming\nincreasingly difficult. So, we moved away from the\n[asset pipeline and utilized webpack](/blog/vue-big-plan/),\nwhich has given us a series of benefits. For example,  when we develop locally,\ndebugging code is now a breeze, and the jobs that are frontend related run on\nproduction-bundled code, ensuring a testing environment that closely resembles\nthat of a user.\n\nAfter CI, we publish code using DevOps by hosting it with\n[GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/)). We’ve seen several projects benefit from\nadopting a DevOps model, including\n[GitLab SVG libraries](https://gitlab.com/gitlab-org/gitlab-svgs) and\n[Trello Power-Up](https://docs.gitlab.com/ee/integration/trello_power_up.html).\n\nWhen we created GitLab SVG libraries, we wanted to use them for ourselves and\nmake them available to the general public, so whenever we publish a new version,\nwe use GitLab Pages so that it’s fully automated every time.\n\nWith the Trello Power-Up plugin, we use DevOps to address compatibility\nissues when a new version of Trello is released. GitLab Pages makes it easy to\ndeploy a new version, in a fast and diligent manner, so that it’s accessible in\nthe Trello Marketplace as quickly as possible.\n\n## Frontend DevOps and Data-driven efforts\n\nIncorporating frontend DevOps and CI/CD into the workflow has had a significant\nimpact on efficiency and results. We have greater insight into our operations\nand have metrics to help us detect major areas of improvement. We set up\n[Sitespeed](https://www.sitespeed.io) using Kubernetes to analyze sets of pages\nand provide reports on anything that could hamper our users’ perceived\nperformance, from CSS and JavaScript bundle sizes to accessibility issues and\nthe render time differences between various points in time. The information we gathered using\nSitespeed has helped us improve the merge requests page and identify pages that\nrender slowly. Having more data has changed the way we approach problems at\nGitLab, because we are able to focus our efforts on specific areas.\n\n## The unexpected discovery of problems\n\nOne of the unexpected benefits of our workflow is the discovery of problems that\nwe may not have identified.\n\n### A lack of automation\n\nWe realized, for example, that we lack some automation in our tools. For\ninstance, every time we didn’t format code in a specific way, our linter\nnotified us, but analyzing and fixing the code slowed down developer velocity,\nso we decided to add [Prettier](https://prettier.io/) to format our code in our\nmerge requests for us. We also realized that, sometimes, we need a little bit of\nautomation when we publish code. As an all-remote company, many of us work on\npublic WiFi, and we found that unreliable connections could have detrimental\neffects while deploying code. The combination of CI and DevOps made deployments\neasier. If we triggered a pipeline and a coffee shop WiFi goes vamoose, it\ndoesn't matter. We already automated a significant part of our development\nprocess, but we’re always striving for more.\n\n### A lack of speed\n\nIn the case of CI, we noticed that our own tools can be a source of problems. We\nfound that we didn’t make the necessary considerations to keep our test suite fast.\nAs developers, we want to go back to developing as fast as possible. A few of my\nteammates discovered that our test runs were becoming slower and slower with each\nrelease. Even though these are not customer-facing changes, it has made both\nproduct managers and team managers consider investing in those issues, because\nthe easier the development cycle is for the developers involved, the better it\nis for our customers, since we can deliver even more features. Furthermore, we\ncan prevent regressions from happening by having solid foundations, such as\ntesting, code style, and code formatting.\n\nEvery time we discover problems that affect us or our work, we realize that we\ncan also jeopardize the features and experiences we want to deliver to our\ncustomers. It has changed the culture inside the team, because we view\nperformance issues as developers rather than as GitLab team-members.\n\n## Advice to frontend teams\n\nUsing DevOps and CI/CD in a frontend workflow is compatible with teams of any\nsize, including small teams that may want to ensure that their code styling is\nthe same.\n\n### Put a linter in place\n\nWith CI, the smallest and perhaps one of the most significant steps is\nto put a linter in place, and if the pipeline doesn't pass, you can’t merge the\ncode. That's such a simple, effective way to improve your code and to keep it\ntidy and clean in the long run. Just setting up some simple steps using CI will\nimprove your team’s code and your developers’ quality of life so that they don't\nhave to worry about combing through past code. Even though small teams might not\nfind the value in the short term, when they scale, they certainly will.\n\n### Create consistent scenarios\n\nThe bigger the project, the more you realize that some of your tooling ends up\nrunning locally, and it's beneficial to run it on CI. If something doesn't work\non a generic type of machine that has enough dependencies installed to run your\nCI setup, that means there’s something wrong and that you should probably fix it\nbefore merging your code. As long as you can create a consistent scenario in which\nyou can do things like testing and linting, you should be in a good position to\ndeliver a great product.\n\n### Select CI-compatible tools\n\nFor teams of all sizes, it’s important that the tools you select as part of your\nworkflow are compatible with CI in some way, so that even if you had a big part\nof your workflow running locally, you can easily move to CI by creating a pipeline\nthat resembles that of your daily workflow. Regardless of the tool that you choose,\ncreating a job for it will return a lot of value in the long run. If it makes\nsense, I encourage you to add it, because there’s very little incentive not to.\nCI-compatible tools include tests runners, linters, Prettier, or any custom-made\ntools that help you in some way. One decision you might want to avoid is creating\non servers that live on CI runners. Since they only run for a limited amount of\ntime, these servers will stop existing. You could also add deployments to your\nCI workflow, helping you with DevOps and preventing you from worrying about\ncomplicated local setups for new developers. The possibilities are huge.\n\n### Add performance testing\n\nTo add to the pool of possibilities, why not add performance testing to your\nmerge requests with a tool such as\n[Lighthouse](https://developers.google.com/web/tools/lighthouse/), which can\nhelp you understand potential performance bottlenecks in your website. Or, maybe\nyour team can add the ability to generate code documentation and publish it via\nGitLab Pages. CI/CD can be a really good tool, because it will return something\nimmediately. It's just a matter of how you want to use it, depending on your needs.\n\nThe more the frontend team uses CI and DevOps, the more we discover ways to use\nit, so it’s worth it to us to invest in this tool.\n\nSometimes, we just want to\nget stuff out there without too much consideration for tooling and CI and CD,\nbut because of the benefits we’ve experienced, we now include CI/CD in all of\nour projects. With GitLab, everything is integrated, so why skip it? Instead of\nfighting against automation, I encourage teams to embrace the idea that CI is\nthere to help you.\n\n[Cover image](https://unsplash.com/photos/UbGqwmzQqZM) by\n[Zhipeng Ya](https://unsplash.com/photos/UbGqwmzQqZM?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText), licensed\nunder [CC X](https://unsplash.com/license).\n{: .note}\n",[1979,727,832,894],{"slug":7194,"featured":6,"template":678},"how-devops-and-gitlab-cicd-enhance-a-frontend-workflow","content:en-us:blog:how-devops-and-gitlab-cicd-enhance-a-frontend-workflow.yml","How Devops And Gitlab Cicd Enhance A Frontend Workflow","en-us/blog/how-devops-and-gitlab-cicd-enhance-a-frontend-workflow.yml","en-us/blog/how-devops-and-gitlab-cicd-enhance-a-frontend-workflow",{"_path":7200,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7201,"content":7207,"config":7211,"_id":7213,"_type":16,"title":7214,"_source":17,"_file":7215,"_stem":7216,"_extension":20},"/en-us/blog/why-you-should-join-the-gitlab-security-team",{"title":7202,"description":7203,"ogTitle":7202,"ogDescription":7203,"noIndex":6,"ogImage":7204,"ogUrl":7205,"ogSiteName":692,"ogType":693,"canonicalUrls":7205,"schema":7206},"Why you should join the GitLab security team","Meet Director of Security Kathy Wang for a look inside our remote (and growing!) security team.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668486/Blog/Hero%20Images/why-you-should-join-the-gitlab-security-team.jpg","https://about.gitlab.com/blog/why-you-should-join-the-gitlab-security-team","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why you should join the GitLab security team\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emily von Hoffmann\"}],\n        \"datePublished\": \"2018-08-09\",\n      }",{"title":7202,"description":7203,"authors":7208,"heroImage":7204,"date":7190,"body":7209,"category":14,"tags":7210},[6728],"\nOur security team is small but mighty and looking to\n[add new team members](/jobs/)! I sat down with Director\nof Security [Kathy Wang](/company/team/#wangkathy), who built\nthe team from scratch. See our conversation below and get to know\nthe [rest of our security team](/company/team/), and read about\ntheir work in the [security handbook](/handbook/security/).\n\n#### Can you tell us a little about your previous experience and what you do at GitLab?\n\nI’m a career security practitioner and have been focusing on security for\nnearly 20 years. At GitLab, my team is responsible for securing all GitLab\nproducts and services, including the GitLab.com infrastructure.\n\n#### If I remember correctly, you started the security team from scratch – how has that experience been, and what are you excited about?\n\nIt is always exciting to build a security team from initial stages to maturity!\nI was the de facto CISO at a tech firm two jobs ago, where I built a security\nteam as well, and each time, it is a different experience. GitLab is a very\nunique company. I have never encountered a company quite as transparent as GitLab,\nand 100 percent of GitLab’s employees are remote. That presents its own set of\nsecurity challenges, but these are exciting challenges that my team is\nwell equipped to handle.\n\n#### In the grand scheme of things, security is a pretty new field, so what are some ways that people can get into security?\n\nSeasoned security professionals are in high demand, and there’s never been a\nbetter time to get into security! I’ve mentored a number of people looking to\nbecome security practitioners, and one of my first suggestions is to start\nattending local security meetups and events. Getting to know other security\npractitioners in your area and listening to their briefings will help you\nunderstand what types of problems security practitioners solve. Through\nnetworking at these events, you’ll discover who is hiring locally and at\nwhat level of expertise.\n\nWhen I was starting out in my security career, I attended local security events\nand meetups, and through those events, I met a number of open source developers.\nIt was fun to learn from them and contribute to those projects. In turn, their\nwork inspired me to start a couple of open source projects myself. As a result,\nI discovered that I’m pretty good at assessing gaps in current security capabilities\nand figuring out how to bridge those gaps – but not always in an obvious way.\nTo me, that’s one of the most exciting aspects of this role.\n\n#### What are some transferable skills that you see as good preparation for a role in security?\n\nSecurity practitioners come from quite a varied set of backgrounds. I’ve learned\nthrough working with many people over the years that critical thinking and\nproblem-solving skills are the most transferable. Information security is an\narms race, and continually thinking creatively to minimize security risks is\ntantamount to a successful security career.\n\n#### Tell us about the current security team – how big is it, and what are they currently working on?\n\nWe have a creative and talented security team at GitLab! Our standards are high,\nand we work hard here because we take securing our customers’ data very seriously.\nCurrently, we are a small team and will scale as the company grows. Our Security Vision,\nour hiring plan, and what our security team is focused on are outlined in\nour [security handbook](/handbook/security/#security-vision).\n\n#### Is the team able to make use of our new Security Dashboards feature and consult on improving the feature going forward?\n\nI’ve always believed that our security teams should regularly contribute to our\nproducts and services. At GitLab, the security team is at the forefront of\nproviding that expertise and experience to developers, because we are in the\nbest position to understand what security-minded customers would find actionable\nin security features.\n\nFor example, I recently built my own set of prototype security dashboards, so\nthat I could explain to engineering and marketing teams what \"actionable metrics\"\nmean to security professionals. The security team briefs all of GitLab on a\nbiweekly basis, and those metrics are used to demonstrate progress. You can build\nall the security dashboards and features that you want, but in the end, what\ncan we do with the data to raise the bar in security? After presenting these\nmetrics, I love that from top-down, everyone agreed to bake these improvements\ninto [GitLab’s future product roadmap](/direction/), so that our customers ultimately benefit as well.\n\n#### We have a ton of [security openings](/jobs/) right now – can you share a bit more about the security team's focus and scope moving forward, and what new team members can expect when they join?\n\nSince we are a small security team, we plan to grow the team to scale to the\ngrowth of the rest of GitLab, so that remains our focus for the foreseeable future.\nAs an all-remote company, we all work in different locations, and new team members\nshould expect to collaborate across teams and departments – not just within the\nsecurity team. It’s ironic, but when working entirely remotely, it’s even more\nimportant to over-communicate with everyone in order to obtain and deliver results.\n\n#### Do you have any tips for people applying to security roles at GitLab?\n\nWe are very interested in anyone passionate about security. It’s even better if\nyou have contributed to open source projects. We want to know that you will bring\nwith you a resolve to be constructive when working with others. At GitLab, everyone\ncontributes to making our firm secure; as such, our security team continually\neducates and guides our staff on secure practices in order to mitigate evolving threats.\n\n#### Anything else to share with folks interested in security and GitLab?\n\nAt GitLab, we value all contributions made by our staff in an open and transparent\nmanner. Our security team continues to make positive, measurable impact across\nthe company that can be easily translated to long-term customer value. We enjoy\nhealthy, transparent debates about secure practices, while quickly implementing\neffective solutions that empower us to make better data-driven decisions, long term.\n\nCover image by Felix Russell-Saw on [Unsplash](https://unsplash.com/)\n{: .note}\n",[1307],{"slug":7212,"featured":6,"template":678},"why-you-should-join-the-gitlab-security-team","content:en-us:blog:why-you-should-join-the-gitlab-security-team.yml","Why You Should Join The Gitlab Security Team","en-us/blog/why-you-should-join-the-gitlab-security-team.yml","en-us/blog/why-you-should-join-the-gitlab-security-team",{"_path":7218,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7219,"content":7225,"config":7231,"_id":7233,"_type":16,"title":7234,"_source":17,"_file":7235,"_stem":7236,"_extension":20},"/en-us/blog/git-happens",{"title":7220,"description":7221,"ogTitle":7220,"ogDescription":7221,"noIndex":6,"ogImage":7222,"ogUrl":7223,"ogSiteName":692,"ogType":693,"canonicalUrls":7223,"schema":7224},"Git happens! 6 Common Git mistakes and how to fix them","Whether you added the wrong file, committed directly to master, or some other mishap, we've got you covered.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678743/Blog/Hero%20Images/fix-common-git-mistakes.jpg","https://about.gitlab.com/blog/git-happens","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Git happens! 6 Common Git mistakes and how to fix them\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sam Beckham\"}],\n        \"datePublished\": \"2018-08-08\",\n      }",{"title":7220,"description":7221,"authors":7226,"heroImage":7222,"date":7228,"body":7229,"category":14,"tags":7230},[7227],"Sam Beckham","2018-08-08","\nWe all make mistakes, especially when working with something as complex as Git. But remember, Git happens!\n\n## What is Git?\n\nGit is free and open-source software for distributed code management and version control. It is distributed under the GNU General Public License Version 2. Git tracks changes in any set of files and is usually used for coordinating work among programmers collaboratively developing source code during software development. \n\nGit was created and released in 2005 by Linus Torvalds, who also developed Linux. The impetus for Git (which is an altering of the word “get”) was to generate an open-source version control system that performed better for the requirements of Linux kernel development. Available open-source systems at the time were not able to meet the [large-scale collaborative performance effort](https://www.techtarget.com/searchitoperations/definition/Git) required.\n\n## Benefits of using Git\n\nBesides delivering superior performance, Git also provides support for a distributed workflow and safeguards against corruption. There are several other benefits, such as:\n\n- superior performance when it comes to version control systems\n- the ability for simultaneous development because everyone has their own local copy of code and can work on it in tandem. \n- faster releases\n- security\n- flexibility\n- built-in integration \n- strong community support\n\nIf you're brand-new to Git, you can learn [how to start using Git on the command line](https://docs.gitlab.com/ee/gitlab-basics/start-using-git.html). Here's how we can fix six of the most common Git mistakes.\n\n## 1. Oops... I spelled that last commit message wrong\n\nAfter a good few hours of [coding](/solutions/source-code-management/), it's easy for a spelling error to sneak into your commit messages.\nLuckily, there's a simple fix.\n\n```bash\ngit commit --amend\n```\n\nThis will open up your editor and allow you to make a change to that last commit message.\nNo one needs to know you spelled, \"addded\" with three \"d\"s.\n\n## 2. Oops... I forgot to add a file to that last commit\n\nAnother common Git pitfall is committing too early. You missed a file, forgot to save it, or\nneed to make a minor change for the last commit to make sense. `--amend` is your friend\nonce again.\n\nAdd that missed file then run that trusty command.\n\n```bash\ngit add missed-file.txt\ngit commit --amend\n```\n\nAt this point, you can either amend the commit message or just save it to keep it the same.\n\n## 3. Oops... I added a file I didn't want in the repo\n\nBut what if you do the exact opposite? What if you added a file that you didn't want to commit?\nA rogue ENV file, a build directory, a picture of your cat that you accidentally saved to the wrong folder?\nIt's all fixable.\n\nIf all you did was stage the file and you haven't committed it yet, it's as simple as resetting that staged file:\n\n```bash\ngit reset /assets/img/misty-and-pepper.jpg\n```\n\nIf you've gone as far as committing that change, you need to run an extra step before:\n\n```bash\ngit reset --soft HEAD~1\ngit reset /assets/img/misty-and-pepper.jpg\nrm /assets/img/misty-and-pepper.jpg\ngit commit\n```\n\nThis will undo the commit, remove the image, then add a new commit in its place.\n\n## 4. Oops... I committed all those changes to the master branch\n\nSo you're working on a new feature and in your haste, you forgot to open a new branch for it.\nYou've already committed a load of files and now them commits are all sitting on the master branch.\nLuckily, [GitLab can prevent you from pushing directly to master](/blog/keeping-your-code-protected/).\nSo we can roll back all these changes to a new branch with the following three commands:\n\n*Note: Make sure you commit or stash your changes first, or all will be lost!*\n\n```bash\ngit branch future-brunch\ngit reset HEAD~ --hard\ngit checkout future-brunch\n```\n\nThis creates a new branch, then rolls back the master branch to where it was before you made\n changes, before finally checking out your new branch with all your previous changes intact.\n\n## 5. Oops... I made a spelling mistake in my branch name\n\nThe keen-eyed among you will notice a slight spelling error in my last example. It's almost\n3:00 PM and I haven't had lunch yet, so in my hunger, I've named our new branch `future-brunch`.\nDelicious.\n\nWe rename this branch in a similar way to how we rename a file with the `mv` command: by\n moving it to a new location with the correct name.\n\n```bash\ngit branch -m future-brunch feature-branch\n```\n\nIf you've already pushed this branch, there are a couple of extra steps required. We need to\ndelete the old branch from the remote and push up the new one:\n\n```bash\ngit push origin --delete future-brunch\ngit push origin feature-branch\n```\n\n## 6. Oops... I did it again\n\nThis command is for when everything has gone wrong. When you've copy-pasted one too\nmany solutions from Stack Overflow and your repo is in a worse state than it was when you started.\nWe've all been there.\n\n`git reflog` shows you a list of all the things you've done.\nIt then allows you to use Git's magical time-traveling skills to go back to any point in the past.\nI should note, this is a last resort thing and should not be used lightly.\nTo get this list, type:\n\n```bash\ngit reflog\n```\n\nEvery step we took, every move we made, Git was watching us.\nRunning that on our project gives us this:\n\n```bash\n3ff8691 (HEAD -> feature-branch) HEAD@{0}: Branch: renamed refs/heads/future-brunch to refs/heads/feature-branch\n3ff8691 (HEAD -> feature-branch) HEAD@{2}: checkout: moving from master to future-brunch\n2b7e508 (master) HEAD@{3}: reset: moving to HEAD~\n3ff8691 (HEAD -> feature-branch) HEAD@{4}: commit: Adds the client logo\n2b7e508 (master) HEAD@{5}: reset: moving to HEAD~1\n37a632d HEAD@{6}: commit: Adds the client logo to the project\n2b7e508 (master) HEAD@{7}: reset: moving to HEAD\n2b7e508 (master) HEAD@{8}: commit (amend): Added contributing info to the site\ndfa27a2 HEAD@{9}: reset: moving to HEAD\ndfa27a2 HEAD@{10}: commit (amend): Added contributing info to the site\n700d0b5 HEAD@{11}: commit: Addded contributing info to the site\nefba795 HEAD@{12}: commit (initial): Initial commit\n```\n\nTake note of the left-most column, as this is the index.\nIf you want to go back to any point in the history, run the below command, replacing `{index}` with that reference, e.g. `dfa27a2`.\n\n```bash\ngit reset HEAD@{index}\n```\n\nSo there you have six ways to get out of the most common Gitfalls.\n\n## More common Git problems \n\nThere are a number of tips for fixing common git problems. For starters, here are a couple of common ones: to indicate the end of command options for command line utilities, try using the double dash (--). If you want to undo a change, use git reset.\n\n- If you have a commit that is only in your local repository, you can amend it with the git commit — amend command.\n- Sometimes, you might find yourself adding files that you didn’t mean to commit. Git rm will remove it from both your staging area, as well as your file system. However, if that’s not the solution you were looking for, make sure you only remove the staged version and add the file to your .gitignore so you don’t make the same mistake again. \n- To fix a typo in a commit message or to add a file, use: git - amend.\n- If you want to remove files from staging before committing, use [“git restore”](https://medium.com/@basitalkaff/common-git-problems-and-how-to-fix-them-878ef750a015) to reset the pointer back to the last commit ID.\n- If you have a change of heart and want to remove changes from a commit before pushing and reverting back, use “git reset \u003Cspecific commit ID we want to go back>.”\n- Faulty commits sometimes make their way into the central repository. When that happens, instead of creating additional revert commits, just apply the necessary changes and use the --no-commit/-n option.\nInstead of having to reinvent the wheel, use the reuse recorded resolution feature to fix repetitive merge conflicts. Add \"git config --global rerere.enabled true\" to your global config to enable it for all projects. \n\nIf you prefer, you can manually create the directory: \n\n.git/rr-cache to enable it for each project.\n\n## How to prevent problems with your git repository\n\nIt’s important to consider git repository security for web projects. Why? When you deploy a [web page from a git repository](https://www.techtarget.com/searchsecurity/answer/How-can-developers-avoid-a-Git-repository-security-risk), you could also make the directory and its contents accessible. This gives an attacker the ability to access the metadata from URLs such as https://example.org/git/config.\n\nIf a git repository is checked out using HTTP authentication where the username and password to access the repository are incorporated as part of the URL, that can create an especially unsafe situation. Because this information is stored in the .git/config file, an attacker has direct access to credentials for the repository.\n\nTo avoid these risks and improve the security of a git repository, developers should refrain from using direct git checkouts on web deployments. Instead, they should copy files to the web root directory without the .git directory metadata. Alternatively, access to the .git directory can be bypassed in the server configuration. It's also a good idea to avoid storing passwords and secret tokens right in repositories.\n\nSome suggestions to [stop git repositories from getting too big](https://stackoverflow.com/questions/58679210/how-to-stop-git-repositories-from-getting-too-big): avoid cluttering the repository with large numbers of files, don’t include binary or office files that require huge commits in the number of lines edited, and from time to time, use commands like\ngit reflog expire --all --expire=now git gc --prune=now --aggressive.\n\nHere is an approach for [fixing a corrupted git repository](https://stackoverflow.com/questions/18678853/how-can-i-fix-a-corrupted-git-repository).\n\n## Some common git commands\n\nThere are hundreds of git commands programmers can use to change and track projects. Some of the [more common ones](https://shortcut.com/blog/common-git-commands-that-you-should-memorize#:~:text=13%20common%20Git%20commands%20that%20you%20should%20consider,compare%20unstaged%20files%20before%20committing%20...%20More%20items) are:\n\n**Create a new repository for storing code/making changes:** \n\nA new project requires a repository where your code is stored and changes can be made.\nCommand:\n\ngit init\n\nOr change a current directory into a Git repo using:\n\ngit init \u003Cdirectory>\n\n**Configure local and global values:**\n\nCommand:\n\ngit config --global user.email \u003Cyour-email> or git config -\n\n**Use cloning to get source code from your remote repo**\n\nWhen working on an existing project, you can use the clone command to create a copy of your remote rep in GitLab and make changes without overwriting the master version.\n\nWhen this command is used, you will get access to a copy of the source code on your local machine and make changes to it without compromising the master.\n\nTo download your project, use this:\n\ngit clone \u003Crepo URL>\n\n**Create a local workspace:**\n\nWhen collaborating with other developers on a project, using branches lets you modify and reference copies of the same portions of source code and merge them at a later point. This avoids a situation where developers are making changes to the same code at the same time, creating errors and broken code/features.\n\n[To create a new local branch](https://shortcut.com/blog/common-git-commands-that-you-should-memorize#:~:text=13%20common%20Git%20commands%20that%20you%20should%20consider,compare%20unstaged%20files%20before%20committing%20...%20More%20items):\n\ngit branch \u003Cbranch-name>\n\nPush this local branch to the remote repo with the following:\n\ngit push -u \u003Cremote> \u003Cbranch name>\n\nView existing branches on the remote repo with the following:\n\ngit branch or git branch—list\n\nAnd delete a branch with:\n\ngit branch -d \u003Cbranch-name> \n\n**Switch branches, inspect files and commits:**\n\nWith git checkout, you can move between the master branch and your copies locally, and it can be used to inspect the file and [commit history](/blog/keeping-git-commit-history-clean/). You will start out with the local clone of your master branch by default. You’ll need to run the command to switch between branches to make changes to a different local branch. One thing to note: make sure that you commit or stash any in-progress changes before switching; otherwise, you could encounter errors.\n\nCommand:\n\ngit checkout \u003Cname of your branch>\n\nOr create a new branch and switch to it with one command:\n\ngit checkout -b \u003Cname-of-your-branch>\n\nHave some Git tips of your own? Let us know in the comments below, we'd love to hear them.\n\nPhoto by [Pawel Janiak](https://unsplash.com/photos/WtRuYJ2EPMA?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/mistake?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[702],{"slug":7232,"featured":6,"template":678},"git-happens","content:en-us:blog:git-happens.yml","Git Happens","en-us/blog/git-happens.yml","en-us/blog/git-happens",{"_path":7238,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7239,"content":7245,"config":7250,"_id":7252,"_type":16,"title":7253,"_source":17,"_file":7254,"_stem":7255,"_extension":20},"/en-us/blog/understanding-kubernestes-rbac",{"title":7240,"description":7241,"ogTitle":7240,"ogDescription":7241,"noIndex":6,"ogImage":7242,"ogUrl":7243,"ogSiteName":692,"ogType":693,"canonicalUrls":7243,"schema":7244},"What you need to know about Kubernetes RBAC","Role-based access control is now default, and expected in most Kubernetes deployments. Here's the What, Why and How of RBAC.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678884/Blog/Hero%20Images/understanding-kubernetes-rbac-post-cover.jpg","https://about.gitlab.com/blog/understanding-kubernestes-rbac","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What you need to know about Kubernetes RBAC\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2018-08-07\",\n      }",{"title":7240,"description":7241,"authors":7246,"heroImage":7242,"date":7247,"body":7248,"category":14,"tags":7249},[1161],"2018-08-07","\nManaging access to resources is an essential part of ensuring the reliability, security, and efficiency of any infrastructure, but can quickly get complicated to manage. With Kubernetes, attribute-based access control (ABAC) is very powerful but complex, while role-based access control (RBAC) makes it easier to manage permissions using kubectl and the Kubernetes API directly. This post shares how to get started with RBAC and some best practices to adopt.\n\n## RBAC vs ABAC\n\nRBAC made beta [release with Kubernetes 1.6](https://kubernetes.io/blog/2017/04/rbac-support-in-kubernetes/) and general availability [with 1.8](https://kubernetes.io/blog/2017/10/using-rbac-generally-available-18/). A fundamental building block of Kubernetes, RBAC is an authorization mechanism for controlling how the Kubernetes API is accessed using permissions.\n\nRBAC is now preferred over ABAC, which is difficult to manage and understand. ABAC also requires SSH and root access to make authorization policy changes.\n\nResource management can be delegated using RBAC without giving away SSH access to the Cluster Master VM and permission policies can be configured using kubectl or the Kubernetes API itself.\n\n## RBAC resources\n\nUsing RBAC, Authorizations can be given using a set of permissions that can be limited within a namespace or the entire cluster. To do this, you can define A set of permission is called a Role, which is defined within a namespace. If you want A role that is cluster-wide, this is defined as a ClusterRole.\n\nBelow, you can see an example of a role definition:\n\n### Role\n\n```\nkind: Role\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  namespace: default\n  name: pod-reader\nrules:\n- apiGroups: [\"\"] # \"\" indicates the core API group\n  resources: [\"pods\"]\n  verbs: [\"get\", \"watch\", \"list\"]\n```\n\nLike other Kubernetes resources, a role definition contains kind, apiVersion, and metadata, but with the addition of rules.\n\nFor the rules key, you will define how your permissions will work. You can specify what resources within apiGroup(s) are permitted and how they can be accessed using verbs (including `create`, `delete`, `deletecollection`, `get`, `list`, `patch`, `update`, and `watch`). The apiGroups key defines the location in the API where the resources are found. If you provide an empty value in this list, it means the core API group.\n\n### ClusterRole\n```\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  # \"namespace\" omitted since ClusterRoles are not namespaced\n  name: secret-reader\nrules:\n- apiGroups: [\"\"]\n  resources: [\"secrets\"]\n  verbs: [\"get\", \"watch\", \"list\"]\n```\n\nThe major difference in the definition for a `ClusterRole` is the absence of a namespace, because the permissions defined here are cluster-scoped. However, when referenced by a `RoleBinding`, a `ClusterRole` can be used to grant permissions to namespaced resources defined in the `ClusterRole` role within the `RoleBinding`’s namespace.\n\n### RoleBinding and ClusterRoleBinding\n\nA RoleBinding allows you to associate a Role with a user or list of users. This grants the Role permissions to the users. The user(s) are defined under subjects, and the Role association under role references (roleRef). For example:\n\n#### RoleBinding:\n\n```\nkind: RoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: read-pods\n  namespace: default\nsubjects:\n- kind: User\n  name: abu\n  apiGroup: rbac.authorization.k8s.io\nroleRef:\n  kind: Role\n  name: pod-reader\n  apiGroup: rbac.authorization.k8s.io\n```\n\n#### ClusterRoleBinding:\n\n```\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: read-secrets-global\nsubjects:\n- kind: Group\n  name: manager\n  apiGroup: rbac.authorization.k8s.io\nroleRef:\n  kind: ClusterRole\n  name: secret-reader\n  apiGroup: rbac.authorization.k8s.io\n```\n\n## Best practices\n\nApplying the principle of [least privileges](https://medium.com/@haim_50405/establish-least-privileged-best-practice-for-your-kubernetes-clusters-f0785e1aee39) is crucial, as it reduces exposure and vulnerability. A few of the essential best practices include:\n\n- Be specific with the resources you are granting access to and the verbs being used; avoid wild cards\n- Use Roles instead of Cluster Roles where possible\n- Only give permissions required for the specific tasks to be performed by a user and nothing more\n- Create and use service accounts for processes and services like [Tiller](https://docs.helm.sh/rbac#tiller-and-role-based-access-control) that require permission instead of using the default service accounts\n\n## GitLab + RBAC\n\nCurrently, integrating GitLab with a Kubernetes cluster with RBAC enabled is not supported. You will need to enable and use the legacy ABAC mechanism ([see the documentation here](https://docs.gitlab.com/ee/user/project/clusters/index.html#security-implications)). RBAC will be supported in [a future release](https://gitlab.com/gitlab-org/gitlab-ce/issues/29398). This affects GitLab.com and all self-managed versions of GitLab.\n\n## Learn more\n\n- [Controlling access](https://kubernetes.io/docs/reference/access-authn-authz/controlling-access/)\n- [Authorization](https://kubernetes.io/docs/reference/access-authn-authz/authorization/)\n- [RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)\n- [RBAC and TLS certificates](https://sysdig.com/blog/kubernetes-security-rbac-tls/)\n",[1002,873],{"slug":7251,"featured":6,"template":678},"understanding-kubernestes-rbac","content:en-us:blog:understanding-kubernestes-rbac.yml","Understanding Kubernestes Rbac","en-us/blog/understanding-kubernestes-rbac.yml","en-us/blog/understanding-kubernestes-rbac",{"_path":7257,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7258,"content":7264,"config":7269,"_id":7271,"_type":16,"title":7272,"_source":17,"_file":7273,"_stem":7274,"_extension":20},"/en-us/blog/using-the-gitlab-ci-slash-cd-for-smart-home-configuration-management",{"title":7259,"description":7260,"ogTitle":7259,"ogDescription":7260,"noIndex":6,"ogImage":7261,"ogUrl":7262,"ogSiteName":692,"ogType":693,"canonicalUrls":7262,"schema":7263},"How to simplify your smart home configuration with GitLab CI/CD","How to use GitLab pipelines to automatically test and deploy new home-assistant configurations, wherever you are.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678717/Blog/Hero%20Images/ci-smart-home-configuration.jpg","https://about.gitlab.com/blog/using-the-gitlab-ci-slash-cd-for-smart-home-configuration-management","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to simplify your smart home configuration with GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mario de la Ossa\"}],\n        \"datePublished\": \"2018-08-02\",\n      }",{"title":7259,"description":7260,"authors":7265,"heroImage":7261,"date":7266,"body":7267,"category":14,"tags":7268},[6454],"2018-08-02","\nSo you've read all about the [Internet of Things](https://en.wikipedia.org/wiki/Internet_of_things) and all the cool stuff you can do with it – from setting up timers for your lights to [making your breakfast](/blog/introducing-auto-breakfast-from-gitlab/) – and now you're itching to get started? Great!\n\nIf you're a power user, you've probably settled on using [Home Assistant](https://www.home-assistant.io/) as your smart home hub, but this choice has a few pitfalls:\n\n- It's annoying to SSH into the server itself to change configuration. Wouldn't you like to use your favorite local editor instead?\n- How do you keep your configuration backed up?\n- How do you protect yourself from accidentally messing up the configuration?\n\nIn this guide we'll show you how to fix these annoyances yourself, thanks to Git and the power of [GitLab Pipelines](https://docs.gitlab.com/ee/ci/pipelines/index.html)! We will set up a pipeline that will check your home-assistant configuration and deploy it to your home-assistant install, giving you the power to deploy changes from anywhere in the world with a simple `git push`!\nDid you go on vacation and forget you wanted your lights to [turn on and off randomly to make it seem like someone's home](https://community.home-assistant.io/t/set-random-time-for-random-automatic-turning-off-lights-mimic-someone-is-home/3524)? No worries! Just open GitLab's [Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/) and make your changes from your hotel room.\n\nBy the end of this tutorial you'll have:\n\n- Automatic configuration backups thanks to `git`. You'll be able to see the history of every change you've made and revert changes easily.\n- Automatic configuration testing via GitLab pipelines. Never again will a simple typo have you scratching your head, wondering why things don't work!\n- An easy way to push changes to your Home Assistant configuration without having to SSH into the server.\n\n## Requirements\n\nIn this guide we'll be assuming a few things:\n\n- You installed Home Assistant using the Docker image\n- The server Home Assistant runs in is accessible from the internet via SSH (or you're using a self-managed GitLab installation in the same network)\n\n## Set up your server\n\n1.   Navigate to your Home Assistant configuration folder.\n1.   Create a new file called `.gitignore` with the following content:\n\n     ```\n     *.db\n     *.log\n     ```\n\n1.   Initialize the Git repo\n\n     ```bash\n     git init\n     git add .\n     git commit -m 'Initial commit'\n     ```\n1.   [Create a new GitLab project](https://gitlab.com/projects/new) and push to it\n\n     ```bash\n     git remote add origin YOUR_PROJECT_HERE\n     git push -u origin master\n     ```\n\nWith this you now have a backup of your Home Assistant configuration. Let's now set up the GitLab pipeline!\n\n## Setting up the pipeline\n\nWe have a few goals for the [CI/CD pipeline](/topics/ci-cd/):\n- Test the new configuration to ensure it's valid\n- Deploy the new configuration to the Home Assistant server\n- Bonus: Notify us of a successful deployment, since the default is to only notify for failures\n\n[The complete `.gitlab-ci.yml` can be found here.](https://gitlab.com/mdelaossa/hass-via-cicd/blob/master/.gitlab-ci.yml)\n{: .note}\n[General documentation for how to configure jobs can be found here.](https://docs.gitlab.com/ee/ci/yaml/)\n{: .note}\n\nWe will be using the following stages in our pipeline:\n- test: Will test the Home Assistant configuration to ensure it is valid\n- deploy: Will update the Home Assistant configuration in the server and restart Home Assistant\n- notify: Will send a push notification with success/failure state\n\nSince these aren't default pipeline stages we need to declare them in our `.gitlab-ci.yml` like so:\n\n```yaml\nstages:\n  - test\n  - deploy\n  - notify\n```\n\n### Automating configuration testing\n\nSince GitLab CI/CD [supports Docker images](https://docs.gitlab.com/ee/ci/docker/using_docker_images.html) and Home Assistant is available as a Docker image, this is a fairly straightforward stage to add.\n\nAdd this to your `.gitlab-ci.yml` file:\n\n```yaml\ntest:\n  stage: test\n  image: homeassistant/amd64-homeassistant\n  script:\n    - hass --script check_config -c .\n```\n\nWith this we are creating a job called `test` which will run in the `test` stage. We're using the `homeassistant/amd64-homeassistant` image because it exposes the `hass` command globally so we can use the built-in configuration checking command on our committed files. That's it!\n\nFeel free to commit and push this change to test it out!\n\n```bash\ngit add .\ngit commit -m 'Added testing stage to GitLab pipeline'\ngit push\n```\n\nYou'll now see that a pipeline gets created whenever you push:\n\n![HASS Test pipeline success](https://about.gitlab.com/images/blogimages/hass-cicd/pipeline-pass-1.png){: .shadow.center.large}\n\nIf your configuration contains any errors, they'll be shown in the `Failed Jobs` view of the pipeline and you'll get an email notifying you of the failure:\n\n![HASS Test pipeline failure](https://about.gitlab.com/images/blogimages/hass-cicd/pipeline-fail-1.png){: .shadow.center.large}\n\n### Automating deployments\n\nNow that we have automated testing, let's add another stage that will deploy our new configuration if the tests pass!\n\n\"Deploying\" in this case will consist of:\n- SSHing into the server\n- Doing a `git pull` to pull down changes from the repo\n- Restart the Home Assistant Docker image\n\n#### Preparing the server (and GitLab) for SSH access\n\nSince we will be using SSH we need to prepare our server first. We'll follow [these instructions from the GitLab documentation](https://docs.gitlab.com/ee/ci/ssh_keys/).\nWe will also set some [CI/CD Variables](https://gitlab.com/help/ci/variables/README#variables).\n\n1.   Generate a new SSH key pair. It's OK to save them to the current folder as you'll delete them later anyway.\n\n     ```bash\n     ssh-keygen -t rsa -C \"hass-deploy\" -b 4096\n     ```\n\n1.   On the server that runs Home Assistant, save the contents of the public key (the file ending in `.pub`) to `/home/user_running_hass/.ssh/authorized_keys`\n1.   Go to your GitLab project's CI/CD variables (inside Settings). Add the contents of the private key file to a variable named `SSH_PRIVATE_KEY`. You can now delete the SSH key pair files if you'd like, or store them somewhere safe.\n\nWe also need to add our server's host keys to the GitLab runner so the runner will be able to SSH successfully. Alternatively we could disable host key checking, but this is not recommended.\n\n1.   On your server, run `ssh-keyscan example.com` where example.com is the domain or IP of your server.\n1.   Create a new CI/CD variable called `SSH_KNOWN_HOSTS` and add the output of `ssh-keyscan` to it.\n\nYou should also create two other CI/CD variables (optional):\n- `DEPLOY_USER`: the user running HASS that the runner with SSH into the server as to perform the deploy\n- `DEPLOY_HOST`: the domain or IP of the server\n\n#### The deploy stage\n\nNow that we have prepared our server and GitLab CI/CD variables, we can add our deploy stage to `.gitlab-ci.yml`. Please note that we are using the `only: ` keyword so that only new commits in the `master` branch will attempt a deploy.\n\n```yaml\ndeploy:\n  stage: deploy\n  only:\n    - master\n  before_script:\n    - 'which ssh-agent || ( apt-get update -y && apt-get install openssh-client -y )'\n    - eval $(ssh-agent -s)\n    - echo \"$SSH_PRIVATE_KEY\" | tr -d '\\r' | ssh-add - > /dev/null\n    - mkdir -p ~/.ssh\n    - chmod 700 ~/.ssh\n    - echo \"$SSH_KNOWN_HOSTS\" > ~/.ssh/known_hosts\n    - chmod 644 ~/.ssh/known_hosts\n  script:\n    - ssh $DEPLOY_USER@$DEPLOY_HOST \"cd '$DEPLOY_PATH'; git pull; docker restart home-assistant\"\n```\n\nThe `before_script` above is in charge of:\n- Making sure `ssh-agent` is installed and installing it otherwise\n- Making sure `ssh-agent` is running\n- Adding the `SSH_PRIVATE_KEY` to the keys to use when logging into a server\n- Creating the `.ssh` folder with required permissions\n- Adding the values we added to the `SSH_KNOWN_HOSTS` variable to the proper location\n\nThe `script` portion is what actually deploys our new configuration:\n- We `cd` into the proper location (where the Home Assistant configuration files are kept)\n- We update the configuration with a `git pull`, since this directory is a Git repo\n- We restart Home Assistant (in this case the Docker image was created with the name `home-assistant`. Please use the name of your container)\n\nNote: If you did not create `DEPLOY_USER` and `DEPLOY_HOST` variables on GitLab, please replace the proper values in the script\n{: .note}\n\nNow let's commit and push this new stage to GitLab!\n```bash\ngit add .\ngit commit -m 'Added deploy stage to GitLab pipeline'\ngit push\n```\n\nWith this new stage added, you can now edit your configuration from anywhere (including the GitLab Web IDE!) and be confident that these changes will be pushed to your Home Assistant server if there are no issues with the configuration.\nThere's no longer a need to figure out how to connect directly to your Home Assistant server to make the edits you need.\n\n### Bonus: Successful deployment notifications\n\nYou'll notice that if the configuration is wrong or an error occurs during the deployment, you will get an email notification, but what about when everything runs successfully?\n\nWe have two options:\n\n1. Enable the `Pipeline Emails` integration and set it to notify on every pipeline\n2. Add a new stage called `notify` and use it to send push notifications to your phone\n\nWhile email is really nice, there's something really satisfying about getting push notification for your services, so let's set things up using [Pushover](https://pushover.net/).\nYou'll need to create an 'Application' and add the token you get to a GitLab variable called `PUSHOVER_API_TOKEN`. You'll also need to add your user key to a variable called `PUSHOVER_USER_TOKEN`.\n\nSince we'd like a different notification depending on whether our pipeline passed or failed, we will be adding two jobs to the `notify` stage:\n\n```yaml\nnotify_success:\n  stage: notify\n  allow_failure: true\n  only:\n    - master\n  script:\n    - curl -s --form-string \"token=$PUSHOVER_API_TOKEN\" --form-string \"user=$PUSHOVER_USER_TOKEN\" --form-string \"message=New Hass config deployed successfully!\" https://api.pushover.net/1/messages.json\n\nnotify_fail:\n  stage: notify\n  allow_failure: true\n  only:\n    - master\n  when: on_failure\n  script:\n    - curl -s --form-string \"token=$PUSHOVER_API_TOKEN\" --form-string \"user=$PUSHOVER_USER_TOKEN\" --form-string \"message=New Hass config failed. Please check for errors\" https://api.pushover.net/1/messages.json\n```\n\nOur first job, `notify_success`, runs when the stage before it (`deploy`) completes successfully. This is the default for GitLab. Our `notify_fail` job on the other hand has `when: on_failure` set, which means it will _only_ run when the stage before it fails. We also set `allow_failure: true` on both these jobs so that we aren't notified of a failed pipeline if for some reason the notification commands fail. We also set the `only: - master` option since deploys only happen on the master branch.\n\nWe are using Pushover's API to send the message we want in the `script` area.\n\nWith this final stage in place, your pipeline should now look like this:\n\n![HASS pipeline overview](https://about.gitlab.com/images/blogimages/hass-cicd/pipeline-final-1.png){: .shadow.center.large}\n\n### Enjoy!\n\nThere you have it! Now you can edit your Home Assistant configuration from anywhere you'd like, using your favorite editor, by following three simple steps:\n\n1. `git clone PATH_TO_REPO` (if you have not cloned it before)\n2. Edit the configuration\n3. `git push -u remote master`\n\n[Photo](https://unsplash.com/photos/9TF54VdG0ws?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) by Kevin Bhagat on [Unsplash](https://unsplash.com/search/photos/smart-home?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[832,702],{"slug":7270,"featured":6,"template":678},"using-the-gitlab-ci-slash-cd-for-smart-home-configuration-management","content:en-us:blog:using-the-gitlab-ci-slash-cd-for-smart-home-configuration-management.yml","Using The Gitlab Ci Slash Cd For Smart Home Configuration Management","en-us/blog/using-the-gitlab-ci-slash-cd-for-smart-home-configuration-management.yml","en-us/blog/using-the-gitlab-ci-slash-cd-for-smart-home-configuration-management",{"_path":7276,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7277,"content":7282,"config":7287,"_id":7289,"_type":16,"title":7290,"_source":17,"_file":7291,"_stem":7292,"_extension":20},"/en-us/blog/gcp-move-update",{"title":7278,"description":7279,"ogTitle":7278,"ogDescription":7279,"noIndex":6,"ogImage":6819,"ogUrl":7280,"ogSiteName":692,"ogType":693,"canonicalUrls":7280,"schema":7281},"Update on our planned move from Azure to Google Cloud Platform","GitLab.com is migrating to Google Cloud Platform August 11 – here’s what this means for you now and in the future.","https://about.gitlab.com/blog/gcp-move-update","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Update on our planned move from Azure to Google Cloud Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David Smith\"}],\n        \"datePublished\": \"2018-07-19\",\n      }",{"title":7278,"description":7279,"authors":7283,"heroImage":6819,"date":7284,"body":7285,"category":14,"tags":7286},[2463],"2018-07-19","\n\nNOTE to users in Crimea, Cuba, Iran, North Korea, Sudan, and Syria: GitLab.com may\nnot be accessible after the migration to Google. Google has informed us that\nthere are legal restrictions that are imposed for those countries. See this\n[U.S. Department of the Treasury link](http://www.treasury.gov/resource-center/sanctions/Programs/Pages/Programs.aspx)\nfor more details. At this time, we can only recommend that you download\nyour code or export relevant projects as a backup. See [this issue](https://gitlab.com/gitlab-com/migration/issues/649)\nfor more discussion.\n{: .alert .alert-warning}\n\nUpdate as of August 1: There will be a short maintenance window on Saturday, August 4 at 13:00 UTC. We will perform a test of approximately 1 hour.  This will help us verify some of our fixes to make sure the switchover goes as planned.\n{: .alert .alert-info}\n\nUpdate as of July 27: There will be a short maintenance window on Saturday, July 28 at 13:00 UTC. We will perform a short test of approximately 5 minutes.  This will help us verify some of our fixes to make sure our Chef runs work correctly with GitLab.com inaccessible.\n{: .alert .alert-info}\n\nUpdate as of July 24: Following our dry run of the migration on Saturday, July 21, we have rescheduled the migration with a new target date of Saturday, August 11. You can read through [our findings document](https://docs.google.com/document/d/1Y7Cv4BHmHw8djtDBex8opUGs8t0wWmgrueaCocKfYxs/edit?usp=sharing) for all the details.\n{: .alert .alert-info}\n\nImproving the performance and reliability of [GitLab.com](/pricing/)  has been a top priority for us. On this front we've made some incremental gains while we've been planning for a large change with the potential to net significant results: running GitLab as a [cloud native](/topics/cloud-native/) application on Kubernetes.\n\nThe next incremental step on our cloud native journey is a big one: migrating from Azure to Google Cloud Platform (GCP). While Azure has been a great provider for us, GCP has the best Kubernetes support and we believe will the best provider for our long-term plans. In the short term, our users will see some immediate benefits once we cut over from Azure to GCP including encrypted data at rest on by default and faster caching due to GCP's tight integration with our existing CDN.\n\n## Upcoming maintenance windows for the GCP migration\n\nAs an update to [our earlier blog post on the migration](/blog/moving-to-gcp/), this is a short post to let our community know we are planning on performing the migration of GitLab.com the weekend of ~~July 28~~ August 11 (this has been rescheduled following our dry run on July 21). We have a maintenance window coming up that we would like to make sure everybody knows about.\n\n### What you need to know:\n\nDuring the maintenance windows, the following services will be unavailable:\n\n* SaaS website ([GitLab.com](https://gitlab.com/) will be offline, but [about.gitlab.com](https://about.gitlab.com/) and [docs.gitlab.com](https://docs.gitlab.com/) will still be available)\n* Git ssh\n* Git https\n* registry\n* CI/CD\n* Pages\n\n### Maintenance window - Dry run - Saturday, July 21 at 13:00 UTC\n\nAs a further update to our testing, we are planning to take a short maintenance window this weekend on Saturday, July 21 at 13:00 UTC to do final readiness checks.\nThis maintenance window should last one hour.\n\n2018-07-23 UDPATE: Here are the [finding from the maintenance window](https://docs.google.com/document/d/1Y7Cv4BHmHw8djtDBex8opUGs8t0wWmgrueaCocKfYxs/edit). We've decided to push our target date from July 28th to August 11th to comfortably address several issues. We will likely do a small maintenance window on Saturday, July 28th, and another full practice on Saturday, August 4th.\n\n### Maintenance window - Short test - Saturday, July 28 at 13:00 UTC\n\nWe will perform a short test of approximately 5 minutes.  This will help us verify some of our fixes to make sure our Chef runs work correctly with GitLab.com inaccessible.\n\n\n### Maintenance window - Dry run - Saturday, August 4 at 13:00 UTC\n\nWe will repeat the dry run exercise again to have a chance to verify our changes to the switchover plan.\n\n\n### Maintenance window - Actual switchover - Saturday, ~~July 28~~ August 11 at 10:00 UTC\n\nOn the day of the migration, we are planning to start at 10:00 UTC.  The time window for GitLab.com to be in maintenance is currently planned to be two hours.  Should any times for this change, we will be updating on the channels listed below. When this window is completed GitLab.com will be running out of GCP.\n\n* [GitLab Status page](https://status.gitlab.com/)\n* [GitLab Status Twitter](https://twitter.com/gitlabstatus)\n\n### GitLab Pages and custom domains\n\nIf you have a custom domain on [GitLab Pages](https://about.gitlab.comhttps://docs.gitlab.com/ee/user/project/pages/):\n\n* We will have a proxy in place so you do not have to change your DNS immediately.\n* GitLab Pages will ultimately go to 35.185.44.232 after the July 28 migration.\n* Do not change your DNS to this new address until we have successfully completed the migration.\n* We will post an update to our blog about when the cutoff will be for changing DNS from our Azure address to GCP for GitLab Pages.\n\nShould you need support during the migration, please reach out to [GitLab Support](https://about.gitlab.com/support/).\n\nWish us luck!\n",[728,873,1204,1002],{"slug":7288,"featured":6,"template":678},"gcp-move-update","content:en-us:blog:gcp-move-update.yml","Gcp Move Update","en-us/blog/gcp-move-update.yml","en-us/blog/gcp-move-update",{"_path":7294,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7295,"content":7301,"config":7306,"_id":7308,"_type":16,"title":7309,"_source":17,"_file":7310,"_stem":7311,"_extension":20},"/en-us/blog/solving-gitlabs-changelog-conflict-crisis",{"title":7296,"description":7297,"ogTitle":7296,"ogDescription":7297,"noIndex":6,"ogImage":7298,"ogUrl":7299,"ogSiteName":692,"ogType":693,"canonicalUrls":7299,"schema":7300},"How we solved GitLab's CHANGELOG conflict crisis","How we eliminated changelog-related merge conflicts and automated a crucial part of our release process.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672139/Blog/Hero%20Images/solving-gitlab-changelog-crisis.jpg","https://about.gitlab.com/blog/solving-gitlabs-changelog-conflict-crisis","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we solved GitLab's CHANGELOG conflict crisis\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Robert Speicher\"}],\n        \"datePublished\": \"2018-07-03\",\n      }",{"title":7296,"description":7297,"authors":7302,"heroImage":7298,"date":7303,"body":7304,"category":14,"tags":7305},[4574],"2018-07-03","\n\nSince its [very first commit] more than six years ago, GitLab has had a changelog\ndetailing the noteworthy changes in each release. Shortly after [Enterprise\nEdition (EE) was introduced], it [got a changelog of its own]. Whenever anyone\n– whether it was a community contributor or a GitLab employee – contributed a\nnew feature or fix to the project, a changelog entry would be added to let users\nknow what improved.\n\nAs GitLab gained in popularity and started receiving more contributions, we'd\nconstantly see merge conflicts in the changelog when multiple merge requests\nattempted to add an entry to the list. This quickly became a major source of\ndelays in development, as contributors would have to rebase their branch in order\nto resolve the conflicts.\n\nThis post outlines how we completely eliminated changelog-related merge\nconflicts, removed bottlenecks for contributions, and automated a crucial part\nof our release process.\n\nAt the beginning, GitLab's `CHANGELOG` file would look something like this:\n\n```text\nv 8.0.0 (unreleased)\n  - Prevent anchors from being hidden by header (Stan Hu)\n  - Remove satellites\n  - Better performance for web editor (switched from satellites to rugged)\n  - Faster merge\n  - ...\n  - Ability to fetch merge requests from refs/merge-requests/:id\n\nv 7.14.1\n  - Improve abuse reports management from admin area\n  - Ability to enable SSL verification for Webhooks\n\nv 7.14.0\n  - Fix bug where non-project members of the target project could set labels on new merge requests.\n  - Upgrade gitlab_git to 7.2.14 to ignore CRLFs in .gitmodules (Stan Hu)\n  - ...\n  - Fix broken code import and display error messages if something went wrong with creating project (Stan Hu)\n```\n\nWhen a developer made a change in the upcoming release, `8.0.0` in this example,\nthey would add a changelog entry at the bottom:\n\n```diff\ndiff --git a/CHANGELOG b/CHANGELOG\nindex de2066f..0fc2c18 100644\n--- a/CHANGELOG\n+++ b/CHANGELOG\n@@ -5,6 +5,7 @@ v 8.0.0 (unreleased)\n   - Faster merge\n   - ...\n   - Ability to fetch merge requests from refs/merge-requests/:id\n+  - Made literally everything better. Evvvvverything!\n\n v 7.14.1\n   - Improve abuse reports management from admin area\n```\n\nAt the same time, another developer might have made a similar change in _their_\nbranch:\n\n```diff\ndiff --git a/CHANGELOG b/CHANGELOG\nindex de2066f..5f81cfd 100644\n--- a/CHANGELOG\n+++ b/CHANGELOG\n@@ -5,6 +5,7 @@ v 8.0.0 (unreleased)\n   - Faster merge\n   - ...\n   - Ability to fetch merge requests from refs/merge-requests/:id\n+  - Made a few things worse. Woops!\n\n v 7.14.1\n   - Improve abuse reports management from admin area\n```\n\nNow when one branch was merged, it'd create a conflict in the other:\n\n```diff\ndiff --cc CHANGELOG\nindex 5f81cfd,0fc2c18..0000000\n--- a/CHANGELOG\n+++ b/CHANGELOG\n@@@ -5,7 -5,7 +5,11 @@@ v 8.0.0 (unreleased\n    - Faster merge\n    - ...\n    - Ability to fetch merge requests from refs/merge-requests/:id\n++\u003C\u003C\u003C\u003C\u003C\u003C\u003C HEAD\n +  - Made a few things worse. Woops!\n++=======\n+   - Made literally everything better. Evvvvverything!\n++>>>>>>> developer-1\n\n  v 7.14.1\n    - Improve abuse reports management from admin area\n```\n\nThis resulted in a ton of wasted time as something would get merged, and then\nevery other open branch adding a changelog entry would need to be rebased. The\nsituation only got worse as the number of contributors to GitLab grew over time.\n\nOur initial, [boring solution] to the problem was to begin adding empty\nplaceholder entries at the beginning of each monthly release cycle. The\nchangelog for the upcoming unreleased version might look like this:\n\n```\nv8.1.0 (unreleased)\n  -\n  -\n  -\n  -\n  -\n  -\n  -\n  - (and so on)\n```\n\nA developer would make their change and then choose a random spot in the list to\nadd a changelog entry. This worked for a while, until the placeholders began to\nbe filled out as we got closer to the release date. Eventually two (or more)\nmerge requests would attempt to add different entries at the same placeholder,\nand one being merged created a conflict in the others.\n\nThe problem was lessened, but not solved.\n\nNot only was this a huge waste of time for developers, it created an additional\nheadache for [release managers] when they cherry-picked a commit into a stable\nbranch for a patch release. If the commit included a changelog entry, which any\nchange intended for a patch release _should_ have, cherry-picking that commit\nwould bring in the contents of the changelog at the point of that commit, often\nincluding dozens of unrelated changes. The release manager would have to\nmanually remove the unrelated entries, often doing this multiple times per\nrelease. This was compounded when we had to release multiple patch versions at\nonce due to a security issue.\n\n[very first commit]: https://gitlab.com/gitlab-org/gitlab-ce/commit/9ba1224867665844b117fa037e1465bb706b3685\n[Enterprise Edition (EE) was introduced]: /releases/2013/07/22/announcing-gitlab-enterprise-edition/\n[got a changelog of its own]: https://gitlab.com/gitlab-org/gitlab-ee/commit/e316324be5f71f02a01ae007ab1cf5cbe410c2e1\n[boring solution]: https://handbook.gitlab.com/handbook/values/#efficiency\n[release managers]: https://gitlab.com/gitlab-org/release/docs/blob/master/quickstart/release-manager.md\n\n## Brainstorming solutions\n\nFrustrations with the process finally reached a tipping point, and [an issue was\ncreated] to discuss a solution. [Yorick] had the [original idea] that would\nultimately form the foundation of our solution. During a [trip around the\nworld], myself, [Douwe], and [Marin] were in Brooklyn, NY, and during a walk\naround the city one beautiful summer evening we ended up [with a proposal] to\nfinally solve the problem.\n\nEach changelog entry would be its own YAML file in a `CHANGELOG/unreleased`\nfolder. When a release manager went to cherry-pick a merge into a stable branch\nin preparation for a release, they'd use a custom script that would perform the\ncherry-pick and then move any changelog entry added by that action to a\nversion-specific subfolder, such as `CHANGELOG/8.9.4`. At the time of release,\nany entries in the version's subfolder would be compiled into a single Markdown\nchangelog file, and then deleted.\n\nWith an idea of where we wanted to end up but no idea how to get there, I\nstarted with a [spike].\n\n[an issue was created]: https://gitlab.com/gitlab-org/gitlab-ce/issues/17826\n[original idea]: https://gitlab.com/gitlab-org/gitlab-ce/issues/17826#note_12623521\n[Yorick]: /company/team/#yorickpeterse\n[Douwe]: /company/team/#DouweM\n[Marin]: /company/team/#maxlazio\n[trip around the world]: /2016/08/24/gitlab-in-action/\n[spike]: https://gitlab.com/snippets/1713271\n\n## A turning point\n\nAfter a few days of working on the spike, I [had a realization] that we didn't\nneed the cherry-picking concept at all:\n\n> Cherry picking a merge commit into a stable branch will add that merge's\n> `CHANGELOG/unreleased/whatever-its-called.yml` file to the stable branch. Upon\n> tagging a release with release-tools, we can consider _everything_ in that\n> stable branch's \"unreleased\" folder as part of the tagged release. We collect\n> those files, compile them to Markdown, remove them from the stable branch\n> _and_ `master`, and that's our changelog for the release.\n\nThis was a major \"aha\" moment, as it greatly simplified the\nworkflow for release managers. They could continue their existing workflow, and\nthe release flow would transparently handle the rest. It also meant we could\nhandle everything in our [release-tools] project, which is responsible\nfor tagging a release and kicking off our packaging.\n\nEven though we ended up not using a lot of the work that went into it, my\noriginal spike was still valuable. It allowed us to see pain points early on,\nrefine the process, and find a better solution. It also gave me additional\nexperience interacting with Git repositories programmatically via [Rugged], and\nthat would go on to be especially useful as we implemented the final tooling.\n\n[with a proposal]: https://gitlab.com/gitlab-org/gitlab-ce/issues/17826#note_12998363\n[had a realization]: https://gitlab.com/gitlab-org/gitlab-ce/issues/17826#note_13527876\n[release-tools]: https://gitlab.com/gitlab-org/release-tools/\n[Rugged]: https://github.com/libgit2/rugged\n\n## Building the building blocks\n\nWe knew there were several components that we'd need to build:\n\n1. Something to read and represent the individual YAML data files\n1. Something to compile individual entries into a Markdown list\n1. Something to insert the compiled Markdown into the _correct spot_ in an\n   existing list of releases\n1. Something to remove the files that had been compiled, and then commit the\n   updated `CHANGELOG.md` file to the repository\n\nAll of these components were created in a [single merge request] and refined\nthrough several code review cycles. The commits listed there are all fairly\natomic and may be interesting to read through on their own. The code review that\nhappened in the merge request was incredibly valuable, and allowed us to really\nsimplify some code that was hard to wrap one's head around, even for me as the\noriginal author!\n\n## Automated testing\n\nOf course, we wouldn't consider this solution complete until we had automated\ntests guaranteeing the behavior and consistency of the automated compilation,\nincluding reading from and writing to multiple branches across multiple\nrepositories.\n\nI ended up using Rugged to create [fixture repositories] that would create a\nrepeatable testing environment, which we could then verify with [custom RSpec\nmatchers].\n\n[single merge request]: https://gitlab.com/gitlab-org/release-tools/merge_requests/29\n[fixture repositories]: https://gitlab.com/gitlab-org/release-tools/blob/6531d8d7b7acbdf6ab577db4381036bbc18e3bbc/spec/support/changelog_fixture.rb\n[custom RSpec matchers]: https://gitlab.com/gitlab-org/release-tools/blob/6531d8d7b7acbdf6ab577db4381036bbc18e3bbc/spec/support/matchers/rugged_matchers.rb\n\n## Hooking into the release process\n\nAt this point we were fairly confident the changelog compilation worked, so it\nwas time to [hook it into our existing release process].\n\nWhile testing this integration on a real release, we uncovered a pretty\nhilarious (but dangerous) oversight. I'll let the commit that fixed it speak for\nitself:\n\n> [Protect against deleting everything when there are no changelog entries](https://gitlab.com/gitlab-org/release-tools/merge_requests/47/diffs?commit_id=5b3fe48a7697bda856b6bed1fedc4c210439849b)\n>\n> On a stable branch with no changelog entry files, the resulting empty\n> array was passed to `Rugged::Index#remove_all` which, when given an\n> empty array, removes **everything**. This was not ideal.\n\n[hook it into our existing release process]: https://gitlab.com/gitlab-org/release-tools/merge_requests/47\n\n## Developer tooling\n\nThe final pieces of the puzzle were creating a tool to help developers create\nvalid changelog entries easily, and adding documentation. Both were handled in\n[this merge request](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/7098).\n\nThis tool allows developers to run `bin/changelog`, passing it the title of\ntheir change, to generate a valid changelog entry file. Additional options are\n[in the documentation](https://docs.gitlab.com/ee/development/changelog.html).\n\n## Future plans\n\nThis changelog process has worked beautifully for us since it was introduced,\nand we know it might be just as useful to other projects. We're [investigating a\nway to make it more generic] so that it can remove a tedious chore for more\ndevelopers.\n\nI worked on this project as part of our Edge team, now known as the [Quality\nteam]. If you're interested in this kind of internal tooling or other\nautomation, we're hiring! Check out our [open positions](/jobs/).\n\n[investigating a way to make it more generic]: https://gitlab.com/gitlab-org/release-tools/issues/209\n[Quality team]: https://about.gitlab.com/handbook/engineering/quality/\n\nPhoto by [Patrick Tomasso](https://unsplash.com/photos/1S-PanVaJmU?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/abstract?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[915,727],{"slug":7307,"featured":6,"template":678},"solving-gitlabs-changelog-conflict-crisis","content:en-us:blog:solving-gitlabs-changelog-conflict-crisis.yml","Solving Gitlabs Changelog Conflict Crisis","en-us/blog/solving-gitlabs-changelog-conflict-crisis.yml","en-us/blog/solving-gitlabs-changelog-conflict-crisis",{"_path":7313,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7314,"content":7320,"config":7325,"_id":7327,"_type":16,"title":7328,"_source":17,"_file":7329,"_stem":7330,"_extension":20},"/en-us/blog/introducing-auto-breakfast-from-gitlab",{"title":7315,"description":7316,"ogTitle":7315,"ogDescription":7316,"noIndex":6,"ogImage":7317,"ogUrl":7318,"ogSiteName":692,"ogType":693,"canonicalUrls":7318,"schema":7319},"Introducing Auto Breakfast from GitLab (sort of)","GitLab can't make you breakfast? This is what happens when you tell a GitLab team member whose favorite catchphrase is \"Challenge accepted.\"","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680054/Blog/Hero%20Images/auto-breakfast.jpg","https://about.gitlab.com/blog/introducing-auto-breakfast-from-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing Auto Breakfast from GitLab (sort of)\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2018-06-29\",\n      }",{"title":7315,"description":7316,"authors":7321,"heroImage":7317,"date":7322,"body":7323,"category":14,"tags":7324},[2558],"2018-06-29","\n\nA big part of [GitLab's culture](/company/culture/) is [saying thanks](/handbook/communication/#say-thanks) to one another for doing a great job. That can be anything from helping with a tough technical problem to simply sharing a nice [coffee chat](/company/culture/all-remote/#coffee-chats) to break up the work day. One day a Sales team member thanked someone from Customer Success for a great demo of [GitLab CI/CD](/solutions/continuous-integration/). The customer commented afterwards, \"Okay, what doesn't GitLab do?\"\n\nPlenty of heart-themed emoji reactions followed. We've seen users do some pretty amazing things with GitLab CI/CD, from [ramping up to weekly mobile releases](/blog/continuous-integration-ticketmaster/) to [automating boring Git operations](/blog/automating-boring-git-operations-gitlab-ci/), to [saving 90 percent on EC2 costs](/blog/autoscale-ci-runners/). However, there was one thing we hadn't seen. So in addition to this love, the question also garnered a semi-sarcastic answer:\n\n> It won't make breakfast for you, unfortunately.\n\nNever one to let a Slack conversation go unnoticed, I replied with one of my favorite phrases:\n\n![Challenge Accepted](https://about.gitlab.com/images/blogimages/breakfast-challenge.png){: .shadow.center.medium}\n\nI have to admit that the fact that my status was [`:coffee_parrot:`](https://github.com/jmhobbs/cultofthepartyparrot.com/issues/55) could have been related to my enthusiastic reply...\n\n## The challenge\n\nAt the time I had only a vague idea of how I would accomplish this. Many suggestions about Internet of Things devices followed my comment. And while a toaster with a version of Linux that will never be patched was intriguing, I wanted to do something bigger.\n\nA few years ago some friends got together and bought me an [Anova Sous Vide](https://anovaculinary.com/), knowing that I loved to cook. What they failed to calculate was that having four kids in eight years was counterproductive to learning the time-tested [French cooking method of sous-vide](https://en.wikipedia.org/wiki/Sous-vide). As such, the tool has not had a whole lot of use in its time.\n\nHowever, at this point I thought of two things:\n\n1. I love a new sous-vide egg bite offering from a well-known coffee shop\n1. The Anova Sous Vide uses [bluetooth low energy (BLE)](https://en.wikipedia.org/wiki/Bluetooth_Low_Energy) to allow you to control it through an app\n\n## The recipe (culinary)\n\nWhile I did like the egg bites from a coffee shop that shall remain nameless, I don't have them all the time. I would give them a 5- _star_ rating, but they cost a few more _bucks_ then I’d like to spend 😉 So I found a [sous-vide egg bite recipe](https://recipes.anovaculinary.com/recipe/sous-vide-egg-bites-bacon-gruyere) on Anova's website.\n\n## The recipe (technology)\n\nOnce I had the recipe, all I needed was to reverse engineer the BLE connection, figure out how to get that to work from the command line, set up a project and get it integrated with GitLab CI/CD... no big deal. Luckily I found a fantastic project called PyCirculate that had already worked out a lot of the BLE connection issues with the Anova. It made me wonder if someone else had automated breakfast before... but I've yet to find them!\n\n![Ingredients...Pinterest picture](https://about.gitlab.com/images/blogimages/breakfast-pintrest.png){: .shadow.center.medium}\n\nNow that I had both recipes and all the ingredients, it was time to _*git*_ crackin'... (I can't tell you how happy I was when I thought of that joke. Did I mention I'm a dad?)\n\n### Setting up the breakfast pipeline\n\nOnce I had that project installed and working on my laptop, I uploaded the code to GitLab in the public repository in the [auto-breakfast group](https://gitlab.com/auto-breakfast/eggs/). Next, I installed [GitLab Runner](https://docs.gitlab.com/runner/) on a [RaspberryPi](https://www.raspberrypi.org/). I registered the Pi as a [specific runner](https://docs.gitlab.com/runner/register/) for my project. I used a [runner tag](https://docs.gitlab.com/ee/ci/runners/configure_runners.html#use-tags-to-control-which-jobs-a-runner-can-run) so that I could ensure the cooking job only ran on a device with a Bluetooth connection.\n\n![Specific runner](https://about.gitlab.com/images/blogimages/breakfast-runner.png){: .shadow.small.right.wrap-text}\n\nWhen I run a pipeline on `auto-breakfast/eggs` it uses the RaspberryPi to execute and thus can create the BLE connection to the Anova. With the click of a button in GitLab, my breakfast pipeline was running. All I had to do was sit back, relax, and let GitLab CI/CD do all the work.\n\n![Auto Breakfast pipeline](https://about.gitlab.com/images/blogimages/breakfast-1.JPG){: .shadow.center.medium}\n\n## The results\n\nThe egg bites were great! I even modified the recipe with some great Kerrygold Irish whiskey cheddar cheese. However, I would say that it did take a little more effort to get things set up. However, now that it's done, I have a repeatable, single-button way to cook the recipe again (minus the egg cracking and food processing). Just like CI/CD with a `.gitlab-ci.yml` can help make software build and deployment more reliable and repeatable, it can also make a fantastic breakfast 😎\n\nNot pictured: A very messy kitchen and a very perplexed wife.\n{: .alert .alert-gitlab-purple}\n\n[Photo](https://unsplash.com/photos/I-ykyShydj0?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) by Leti Kugler on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[832,4440],{"slug":7326,"featured":6,"template":678},"introducing-auto-breakfast-from-gitlab","content:en-us:blog:introducing-auto-breakfast-from-gitlab.yml","Introducing Auto Breakfast From Gitlab","en-us/blog/introducing-auto-breakfast-from-gitlab.yml","en-us/blog/introducing-auto-breakfast-from-gitlab",{"_path":7332,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7333,"content":7338,"config":7345,"_id":7347,"_type":16,"title":7348,"_source":17,"_file":7349,"_stem":7350,"_extension":20},"/en-us/blog/play-reviewer-roulette",{"title":7334,"description":7335,"ogTitle":7334,"ogDescription":7335,"noIndex":6,"ogImage":5846,"ogUrl":7336,"ogSiteName":692,"ogType":693,"canonicalUrls":7336,"schema":7337},"Reviewer roulette: Easy way to find merge request reviewers","Finding the right reviewer for a merge request can be tough. Reviewer Roulette makes the decision easier – by making it random!","https://about.gitlab.com/blog/play-reviewer-roulette","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Let's play Reviewer Roulette! An easy way to find a reviewer for your merge request\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dennis Tang\"}],\n        \"datePublished\": \"2018-06-28\",\n      }",{"title":7339,"description":7335,"authors":7340,"heroImage":5846,"date":7342,"body":7343,"category":14,"tags":7344},"Let's play Reviewer Roulette! An easy way to find a reviewer for your merge request",[7341],"Dennis Tang","2018-06-28","\n\nGitLab is [growing quickly], and [constantly looking for more talented people] to join the team. While exciting, it can be tough to keep track of who's who, especially when you're new to the company.\n\nSo how do you know who to contact if you need a pair of eyes on your merge request?\n\n## Meet Reviewer Roulette!\n\nReviewer Roulette is a Slack slash command to help GitLab team-members randomly select a person from a given team, which can be especially useful as multiple teams work together to deliver features in a single merge request.\n\n![Demo of /reviewerroulette](https://about.gitlab.com/images/blogimages/play-reviewer-roulette/demo.gif){: .shadow.center.medium}\n\n---\n\n## The idea\n\nIt's quite common to find that your issue or merge request will have multiple labels to associate different feature areas and teams that are contributing to them. As someone who's recently joined GitLab, I'm still getting to know [all the different teams and people] that work at GitLab. That said, I'm working on a feature with the [CI/CD](/topics/ci-cd/) or discussion team, who should I reach out to if I have questions or need a review of my work?\n\n![Various labels on Merge Requests in gitlab-ce](https://about.gitlab.com/images/blogimages/play-reviewer-roulette/labels.png){: .shadow.center.medium}\n\nThe idea arose from the [frontend team weekly call] where [Tim Zallmann] reminded us that, \"Everyone on the frontend team is a reviewer.\" The team previously had a microservice built by [Luke Bennett] for this, however, it's no longer online. Beyond that, wouldn't it be convenient to simply type a command in Slack to be suggested someone to ping for a review?\n\nI can say with confidence that GitLab is a company that truly exemplifies its values, and I was empowered by the value of [collaboration] to build something that could help our team (and others!) find reviewers. I couldn't be the only one who had this problem!\n\n> **Do it yourself** Our collaboration value is about helping each other when we have questions, need critique, or need help. No need to brainstorm, wait for consensus, or do with two what you can do yourself.\n\nI quickly went to work to (hastily) put together a proof-of-concept to see if it would something that people would want to use.\n\n## Decision fatigue, be gone!\n\n![Screenshot of /reviewerroulette](https://about.gitlab.com/images/blogimages/play-reviewer-roulette/screenshot.png){: .shadow.right.small.wrap-text}\n\nIt was presented to the frontend team and received warmly, and many people were keen to contribute and also [suggest ideas] that would make it even more useful!\n\nAlthough it was originally intended for the frontend group, since I was building it from scratch, it was very easy to make the decision to have it work for all engineering teams.\n\nWith Reviewer Roulette, I don't have to ping entire Slack channels or guess from our team page to try to find _someone_ to talk to.\n\nAdditionally, it provides a number of other benefits such as:\n\n1.  It promotes a more balanced distribution of reviewers amongst the team.\n    * Less experienced reviewers have more opportunities to do code reviews\n    * More experienced reviewers are not as heavily relied on\n1.  It allows more team members to learn more about parts of the codebase they may not be as familiar with, increasing the knowledge of the team overall\n1.  It provides more opportunities to apply our [code review guidelines] or [frontend style guides] to all team members\n1.  It reduces bias towards reviewers that you may unconsciously prefer to select\n\nOf course, we have our various subject matter experts such as our [frontend domain experts] and [gitlab-ce maintainers] who may provide the best insight for a given topic, but it's good to randomly select reviewers by default!\n\n## How it's made\n\nWhen it came to thinking about how to build Reviewer Roulette, it wasn't so much about the tech, than it being about being enabled to create something that will benefit the team.\n\nEmbracing our value of [efficiency], the solution is very much a boring one. It's a simple Node.js application utilizing `js-yaml` and `express` to be able to search our [team structure file] and respond to Slack's slash command requests properly.\n\n## What's next\n\nReviewer Roulette is seeing regular usage, and has [plenty of features planned] to hopefully increase its usefulness.\n\nWhile originally intended for engineering, it can [help the entire company] out. In addition to our [Coffee Break calls], we also have [a step in our onboarding process] to meet five different people across different teams and countries. That's something that Reviewer Roulette could easily help with!\n\nWe also plan on moving it to the frontend [GKE] cluster, and activating [Auto DevOps] to make builds and deployments painless.\n\nIf you're interested in checking it out, feel free to take a look at the [project]! Perhaps it might be useful to you and your team?\n\n## Share your thoughts!\n\nIf there's interest in using Reviewer Roulette for your community contribution to GitLab projects, let us know in the comments and we can release it on Slack for everyone to use!\n\nWhat do you think of Reviewer Roulette? Is this something you would use for your team? How do you pick people for reviewing?\n\n[Photo](https://unsplash.com/photos/w6OniVDCfn0?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) by Krissia Cruz on [Unsplash](https://unsplash.com/search/photos/roulette?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n\n[growing quickly]: /company/okrs/#ceo-great-team-active-recruiting-for-all-vacancies-number-of-diverse-per-vacancy-real-time-dashboard\n[constantly looking for more talented people]: /jobs/\n[all the different teams and people]: /company/team/\n[frontend domain experts]: /handbook/engineering/frontend/#frontend-domain-experts\n[gitlab-ce maintainers]: /handbook/engineering/projects/#gitlab-ce\n[frontend team weekly call]: /handbook/engineering/frontend/#frontend-group-calls\n[Tim Zallmann]: /company/team/#tpmtim\n[Luke Bennett]: /company/team/#__lukebennett\n[suggest ideas]: https://gitlab.com/dennis/reviewer-roulette/issues/\n[plenty of features planned]: https://gitlab.com/dennis/reviewer-roulette/issues/\n[efficiency]: https://handbook.gitlab.com/handbook/values/#efficiency\n[team structure file]: https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/data/team.yml\n[auto devops]: https://docs.gitlab.com/ee/topics/autodevops/\n[coffee break calls]: /company/culture/all-remote/tips/#coffee-chats\n[a step in our onboarding process]: https://gitlab.com/gitlab-com/people-group/employment-templates/-/blob/main/.gitlab/issue_templates/onboarding.md#day-4-morning-social\n[help the entire company]: https://gitlab.com/dennis/reviewer-roulette/issues/12\n[gke]: /partners/technology-partners/google-cloud-platform/\n[project]: https://gitlab.com/dennis/reviewer-roulette/\n[collaboration]: https://handbook.gitlab.com/handbook/values/#collaboration\n[code review guidelines]: https://docs.gitlab.com/ee/development/code_review.html\n[Frontend style guides]: https://docs.gitlab.com/ee/development/fe_guide/index.html#style-guides\n",[915,1347,1979],{"slug":7346,"featured":6,"template":678},"play-reviewer-roulette","content:en-us:blog:play-reviewer-roulette.yml","Play Reviewer Roulette","en-us/blog/play-reviewer-roulette.yml","en-us/blog/play-reviewer-roulette",{"_path":7352,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7353,"content":7358,"config":7363,"_id":7365,"_type":16,"title":7366,"_source":17,"_file":7367,"_stem":7368,"_extension":20},"/en-us/blog/moving-to-gcp",{"title":7354,"description":7355,"ogTitle":7354,"ogDescription":7355,"noIndex":6,"ogImage":6819,"ogUrl":7356,"ogSiteName":692,"ogType":693,"canonicalUrls":7356,"schema":7357},"We’re moving from Azure to Google Cloud Platform","GitLab.com is migrating to Google Cloud Platform – here’s what this means for you now and in the future.","https://about.gitlab.com/blog/moving-to-gcp","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We’re moving from Azure to Google Cloud Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andrew Newdigate\"}],\n        \"datePublished\": \"2018-06-25\",\n      }",{"title":7354,"description":7355,"authors":7359,"heroImage":6819,"date":7360,"body":7361,"category":14,"tags":7362},[1462],"2018-06-25","\nUpdate Jul 19, 2018: The latest info can be found in the [GCP migration update](/blog/gcp-move-update/) blog post. \n{: .alert .alert-info}\n\nImproving the performance and reliability of [GitLab.com](/pricing/)  has been a top priority for us. On this front we've made some incremental gains while we've been planning for a large change with the potential to net significant results: moving from Azure to Google Cloud Platform (GCP).\n\nWe believe [Kubernetes](/solutions/kubernetes/) is the future. It's a technology that makes reliability at massive scale possible. This is why earlier this year we shipped native [integration with Google Kubernetes Engine](/blog/gke-gitlab-integration/) (GKE) to give GitLab users a simple way to use Kubernetes. Similarly, we've chosen GCP as our cloud provider because of our desire to run GitLab on Kubernetes. Google invented Kubernetes, and GKE has the most robust and mature Kubernetes support. Migrating to GCP is the next step in our plan to make GitLab.com ready for your mission-critical workloads.\n\nOnce the migration has taken place, we’ll continue to focus on bumping up the stability and scalability of GitLab.com, by moving our worker fleet across to Kubernetes using GKE. This move will leverage our [Cloud Native charts](https://gitlab.com/charts/gitlab), which with [GitLab 11.0](/releases/2018/06/22/gitlab-11-0-released/#cloud-native-gitlab-helm-chart-now-beta) are now in beta.\n\n## How we’re preparing for the migration\n\n### Geo\n\nOne GitLab feature we are utilizing for the GCP migration is our [Geo product](https://docs.gitlab.com/ee/administration/geo/).\nGeo allows for full, read-only mirrors of GitLab instances. Besides browsing the GitLab UI, Geo instances can be used for cloning and fetching projects, allowing geographically distributed teams to collaborate more efficiently.\n\nNot only does that allow for disaster recovery in case of an unplanned outage, Geo can also be used for a planned failover to migrate GitLab instances.\n\n![GitLab Geo - Migration](https://about.gitlab.com/images/gitlab_ee/gitlab_geo_diagram_migrate.png){: .medium.center}\n\nFollowing our mantra of dogfooding everything of our product, we are using Geo to move GitLab.com from Microsoft Azure to Google Cloud Platform. Geo is working well and scales because it's been used by many customers reliably since going GA. We believe Geo will perform well during the migration and plan this event as another proof point for its value.\n\nRead more about Disaster Recovery with Geo in our [Documentation](https://docs.gitlab.com/ee/administration/geo/disaster_recovery/).\n\n#### The Geo transfer\n\nFor the past few months, we have maintained a Geo secondary site of GitLab.com, called `gprd.gitlab.com`, running on Google Cloud Platform. This secondary keeps an up-to-date synchronized copy of about 200TB of Git data and 2TB of relational data in PostgreSQL. Originally we also replicated Git LFS, File Uploads and other files, but this has since been migrated to Google Cloud Storage object storage, in a parallel effort.\n\nFor logistical reasons, we selected GCP's `us-east1` site in the US state of South Carolina. Our current Azure datacenter is in US East 2, located in Virginia. This is a round-trip distance of 800km, or 3 light-milliseconds. In reality, this translates into a 30ms ping time between the two sites.\n\nBecause of the huge amount of data we need to synchronize between Azure and GCP, we were initially concerned about this additional latency and the risk it might have on our Geo transfer. However, after our initial testing, we realized that network latency and bandwidth were not bottlenecks in the transfer.\n\n### Object storage\n\nIn parallel to the Geo transfer, we are also migrating all file artifacts, including CI Artifacts, Traces (CI log files), file attachments, LFS objects and other file uploads to [Google Cloud Storage](https://cloud.google.com/storage/) (GCS), Google's managed object storage implementation. This has involved moving about 200TB of data off our Azure-based file servers into GCS.\n\nUntil recently, GitLab.com stored these files on NFS servers, with NFS volumes mounted onto each web and API worker in the fleet. NFS is a single-point-of-failure and can be difficult to scale. Switching to GCS allows us to leverage its built-in redundancy and multi-region capabilities. This in turn will help to improve our own availability and remove single-points-of-failure from our stack. The object storage effort is part of our longer-term strategy of lifting GitLab.com infrastructure off NFS. The [Gitaly project](https://gitlab.com/gitlab-org/gitaly), a Git RPC service for GitLab, is part of the same initiative. This effort to migrate GitLab.com off NFS is also a prerequisite for our plans to move GitLab.com over to Kubernetes.\n\n### How we're working to ensure a smooth failover\n\nOnce or twice a week, several teams, including [Geo](/handbook/engineering/development/enablement/systems/geo/), [Production](https://about.gitlab.com/handbook/engineering/infrastructure/production/), and [Quality](https://about.gitlab.com/handbook/engineering/quality/), get together to jump onto a video call and conduct a rehearsal of the failover in our staging environment.\n\nLike the production event, the rehearsal takes place from Azure across to GCP. We timebox this event, and carefully monitor how long each phase takes, looking to cut time off wherever possible. The failover currently takes two hours, including quality assurance of the failover environment.\n\nThis involves four steps:\n\n- A [preflight checklist](https://gitlab.com/gitlab-com/migration/blob/master/.gitlab/issue_templates/preflight_checks.md),\n- The main [failover procedure](https://gitlab.com/gitlab-com/migration/blob/master/.gitlab/issue_templates/failover.md),\n- The [test plan](https://gitlab.com/gitlab-com/migration/blob/master/.gitlab/issue_templates/test_plan.md) to verify that everything is working, and\n- The [failback procedure](https://gitlab.com/gitlab-com/migration/blob/master/.gitlab/issue_templates/failback.md), used to undo the changes so that the staging environment is ready for the next failover rehearsal.\n\nSince these documents are stored as issue templates on GitLab, we can use them to create issues on each successive failover attempt.\n\nAs we run through each rehearsal, new bugs, edge-cases and issues are discovered. We track these issues in the [GitLab Migration tracker](https://gitlab.com/gitlab-com/migration/issues). Any changes to the failover procedure are then made as [merge requests into the issue templates](https://gitlab.com/gitlab-com/migration/merge_requests?scope=all&state=all).\n\nThis process allows us to iterate rapidly on the failover procedure, improving the failover documentation and helping the team build confidence in the procedure.\n\n## When will the migration take place?\n\nOur absolute [top priority](https://gitlab.com/gitlab-com/migration#failover-priorities) for the failover is to ensure that we protect the integrity of our users' data. We will only conduct the failover once we are completely satisfied that all serious issues have been ironed out, that there is no risk of data loss, and that our new environment on Google Cloud Platform is ready for production workloads.\n\nThe failover is currently scheduled for Saturday, July 28, 2018. We will follow this post up shortly with further information on the event and will provide plenty of advance notice.\n\nRead the most recent update on [GitLabs journey from Azure to GCP](/blog/gitlab-journey-from-azure-to-gcp/) here!\n",[728,873,1204,1002],{"slug":7364,"featured":6,"template":678},"moving-to-gcp","content:en-us:blog:moving-to-gcp.yml","Moving To Gcp","en-us/blog/moving-to-gcp.yml","en-us/blog/moving-to-gcp",{"_path":7370,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7371,"content":7377,"config":7383,"_id":7385,"_type":16,"title":7386,"_source":17,"_file":7387,"_stem":7388,"_extension":20},"/en-us/blog/autoscale-continuous-deployment-gitlab-runner-digital-ocean",{"title":7372,"description":7373,"ogTitle":7372,"ogDescription":7373,"noIndex":6,"ogImage":7374,"ogUrl":7375,"ogSiteName":692,"ogType":693,"canonicalUrls":7375,"schema":7376},"How to autoscale continuous deployment with GitLab Runner on DigitalOcean","Our friends over at DigitalOcean share how to configure a highly scalable, responsive and cost-effective GitLab infrastructure.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680042/Blog/Hero%20Images/gitlab-digitalocean-cover.jpg","https://about.gitlab.com/blog/autoscale-continuous-deployment-gitlab-runner-digital-ocean","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to autoscale continuous deployment with GitLab Runner on DigitalOcean\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Owen Williams\"}],\n        \"datePublished\": \"2018-06-19\",\n      }",{"title":7372,"description":7373,"authors":7378,"heroImage":7374,"date":7380,"body":7381,"category":14,"tags":7382},[7379],"Owen Williams","2018-06-19","\n\n[GitLab CI/CD](/solutions/continuous-integration/) is an effective way to build the habit of testing all code before it’s deployed. GitLab CI/CD is also highly scalable thanks to an additional tool, GitLab Runner, which automates scaling your build queue in order to avoid long wait times for development teams trying to release code.\n\nIn this guide, we will demonstrate how to configure a highly scalable GitLab infrastructure that manages its own costs, and automatically responds to load by increasing and decreasing available server capacity.\n\n## Goals\n\nWe’re going to build a scalable CI/CD process on DigitalOcean that automatically responds to demand by creating new servers on the platform and destroys them when the queue is empty.\n\nThese reusable servers are spawned by the GitLab Runner process and are automatically deleted when no jobs are running, reducing costs and administration overhead for your team.\n\nAs we’ll explain in this tutorial, you are in control of how many machines are created at any given time, as well as the length of time they’re retained before being destroyed.\n\nWe’ll be using three separate servers to build this project, so let’s go over terminology first:\n\n* **GitLab**: Your hosted GitLab instance or self-managed instance where your code repositories are stored.\n\n* **GitLab Bastion**: The *bastion* server or Droplet is the core of what we’ll be configuring. It is the control instance that is used to interact with the DigitalOcean API to create Droplets and destroy them when necessary. No jobs are executed on this server.\n\n* **GitLab Runner**: Your *runners* are transient servers or Droplets that are created on the fly by the *bastion* server when needed to execute a CI/CD job in your build queue. These servers are disposable, and are where your code is executed or tested before your build is marked as passing or failing.\n\n![GitLab Runners Diagram](https://assets.digitalocean.com/articles/gitlab-runner/Autoscaling-GitLab-Runners.png){: .medium.center}\n\nBy leveraging each of the GitLab components, the CI/CD process will enable you to scale responsively based on demands. With these goals in mind, we are ready to begin setting up our [continuous deployment](/topics/ci-cd/) with GitLab and DigitalOcean.\n\n## Prerequisites\n\nThis tutorial will assume you have already configured GitLab on your own server or through the hosted service, and that you have an existing DigitalOcean account.\n\nTo set this up on an Ubuntu 16.04 Droplet, you can use the DigitalOcean one-click image, or follow our guide: “[How To Install and Configure GitLab on Ubuntu 16.04](https://www.digitalocean.com/community/tutorials/how-to-install-and-configure-gitlab-on-ubuntu-16-04).”\n\nFor the purposes of this tutorial, we assume you have private networking enabled on this Droplet, which you can achieve by following our guide on “[How To Enable DigitalOcean Private Networking on Existing Droplets](https://www.digitalocean.com/community/tutorials/how-to-enable-digitalocean-private-networking-on-existing-droplets),” but it is not compulsory.\n\nThroughout this tutorial, we’ll be using non-root users with admin privileges on our Droplets.\n\n## Step 1: Import JavaScript project\nTo begin, we will create a new example project in your existing GitLab instance containing a sample Node.js application.\n\n![GitLab interface](https://assets.digitalocean.com/articles/gitlab-runner/gitlab.jpg){: .shadow.large.center}\n\nLog into your GitLab instance and click the **plus icon**, then select **New project** from the dropdown menu.\n\nOn the new project screen, select the **Import project** tag, then click **Repo by URL** to import our example project directly from GitHub.\n\nPaste the below clone URL into the Git repository URL:\n\n```bash\nhttps://github.com/do-community/hello_hapi.git\n```\n\nThis repository is a basic JavaScript application for the purposes of demonstration, which we won’t be running in production. To complete the import, click the **New Project** button.\n\nYour new project will now be in GitLab and we can get started setting up our CI pipeline.\n\n## Step 2: Set up infrastructure\n\nOur GitLab Code Runner requires specific configuration as we’re planning to programmatically create Droplets to handle CI load as it grows and shrinks.\n\nWe will create two types of machines in this tutorial: a **bastion** instance, which controls and spawns new machines, and our **runner** instances, which are temporary servers spawned by the bastion Droplet to build code when required. The bastion instance uses Docker to create your runners.\n\nHere are the DigitalOcean products we’ll use, and what each component is used for:\n\n* **Flexible Droplets** — We will create memory-optimized Droplets for our GitLab Runners as it’s a memory-intensive process which will run using Docker for containerization. You can shrink or grow this Droplet in the future as needed, however we recommend the flexible Droplet option as a starting point to understand how your pipeline will perform under load.\n\n* **DigitalOcean Spaces (Object Storage)** — We will use [DigitalOcean Spaces](https://www.digitalocean.com/products/spaces/) to persist cached build components across your runners as they’re created and destroyed. This reduces the time required to set up a new runner when the CI pipeline is busy, and allows new runners to pick up where others left off immediately.\n\n* **Private Networking** — We will create a private network for your bastion Droplet and GitLab runners to ensure secure code compilation and to reduce firewall configuration required.\n\nTo start, we’ll create the bastion Droplet. Create a [new Droplet](https://cloud.digitalocean.com/droplets/new), then under **choose an image**, select the **One-click apps** tab. From there, select **Docker 17.12.0-ce on 16.04** (note that this version is current at the time of writing), then choose the smallest Droplet size available, as our bastion Droplet will manage the creation of other Droplets rather than actually perform tests.\n\nIt is recommended that you create your server in a data center that includes  [DigitalOcean Spaces](https://www.digitalocean.com/community/tutorials/an-introduction-to-digitalocean-spaces) in order to use the object storage caching features mentioned earlier.\n\nSelect both the **Private networking** and **Monitoring** options, then click **Create Droplet**.\n\nWe also need to set up our storage space which will be used for caching. Follow the steps in “[How To Create a DigitalOcean Space and API Key](https://www.digitalocean.com/community/tutorials/how-to-create-a-digitalocean-space-and-api-key)” to create a new Space in the same or nearest data center as your hosted GitLab instance, along with an API Key.\n\nNote this key down, as we’ll need it later in the tutorial.\n\nNow it’s time to get our CI started!\n\n## Step 3: Configure the GitLab Runner Bastion Server\n\nWith the fresh Droplet ready, we can now configure GitLab Runner. We’ll be installing scripts from GitLab and GitHub repositories.\n\nAs a best practice, be sure to inspect scripts to confirm what you will be installing prior to running the full commands below.\n\nConnect to the Droplet using SSH, move into the `/tmp` directory, then add the [official GitLab Runner repository](https://docs.gitlab.com/runner/install/linux-repository.html) to Ubuntu’s package manager:\n\n```bash\ncd /tmp\ncurl -L https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.deb.sh | sudo bash\n```\n\nOnce added, install the GitLab Runner application:\n\n```bash\nsudo apt-get install gitlab-runner\n```\n\nWe also need to install **[Docker Machine](https://docs.docker.com/machine/install-machine/#install-machine-directly)**, which is an additional Docker tool that assists with automating the deployment of containers on cloud providers:\n\n```bash\ncurl -L https://github.com/docker/machine/releases/download/v0.14.0/docker-machine-`uname -s`-`uname -m` >/tmp/docker-machine && \\\nsudo install /tmp/docker-machine /usr/local/bin/docker-machine\n```\n\nWith these installations complete, we can move on to connecting our GitLab Runner to our GitLab install.\n\n## Step 4: Obtain Runner registration token\n\nTo link GitLab Runner to your existing GitLab install, we need to link the two instances together by obtaining a token that authenticates your runner to your code repositories.\n\nLogin to your existing GitLab instance as the admin user, then click the wrench icon to enter the admin settings area.\n\nOn the left of your screen, hover over **Overview** and select **Runners** from the list that appears.\n\nOn the Runners page under the **How to set up a shared Runner for a new project** section, copy the token shown in Step 3, and make a note of it along with the publicly accessible URL of your GitLab instance from Step 2. If you are using HTTPS for Gitlab, make sure it is not a self-signed certificate, or GitLab Runner will fail to start.\n\n## Step 5: Configure GitLab on the Bastion Droplet\n\nBack in your SSH connection with your bastion Droplet, run the following command:\n\n```bash\nsudo gitlab-runner register\n```\n\nThis will initiate the linking process, and you will be asked a series of questions.\n\nOn the next step, enter the **GitLab instance URL** from the previous step:\n\n```bash\nPlease enter the gitlab-ci coordinator URL (e.g. https://gitlab.com)\nhttps://example.digitalocean.com\n```\n\nEnter the token you obtained from your GitLab instance:\n\n```bash\nPlease enter the gitlab-ci token for this runner\nsample-gitlab-ci-token\n```\n\nEnter a description that will help you recognize it in the GitLab web interface. We recommend naming this instance something unique, like `runner-bastion` for clarity.\n\n```bash\nPlease enter the gitlab-ci description for this runner\n[yourhostname] runner-bastion\n```\n\nIf relevant, you may enter the tags for code you will build with your runner. However, we recommend this is left blank at this stage. This can easily be changed from the GitLab interface later.\n\n```bash\nPlease enter the gitlab-ci tags for this runner (comma separated):\ncode-tag\n```\n\nChoose whether or not your runner should be able to run untagged jobs. This setting allows you to choose whether your runner should build repositories with no tags at all, or require specific tags. Select true in this case, so your runner can execute all repositories.\n\n```bash\nWhether to run untagged jobs [true/false]: true\n```\n\nChoose if this runner should be shared among your projects, or locked to the current one, which blocks it from building any code other than those specified. Select false for now, as this can be changed later in GitLab’s interface:\n\n```bash\nWhether to lock Runner to current project [true/false]: false\n```\n\nChoose the executor which will build your machines. Because we’ll be creating new Droplets using Docker, we’ll choose `docker+machine` here, but you can read more about the advantages of each approach in this [compatibility chart](https://docs.gitlab.com/runner/executors/README.html#compatibility-chart):\n\n```bash\nPlease enter the executor: ssh, docker+machine, docker-ssh+machine, kubernetes, docker, parallels, virtualbox, docker-ssh, shell:\ndocker+machine\n```\n\nYou’ll be asked which image to use for projects that don’t explicitly define one. We’ll choose a basic, secure default:\n\n```bash\nPlease enter the Docker image (e.g. ruby:2.1):\nalpine:latest\n```\n\nNow you’re done configuring the core bastion runner! At this point it should appear within the GitLab Runner page of your GitLab admin settings, which we accessed to obtain the token.\n\nIf you encounter any issues with these steps, the [GitLab Runner documentation](https://docs.gitlab.com/runner/register/index.html) includes options for troubleshooting.\n\n## Step 6: Configure Docker caching and Docker Machine\nTo speed up Droplet creation when the build queue is busy, we’ll leverage Docker’s caching tools on the Bastion Droplet to store the images for your commonly used containers on DigitalOcean Spaces.\n\nTo do so, upgrade Docker Machine on your SSH shell using the following command:\n\n```bash\ncurl -L https://github.com/docker/machine/releases/download/v0.14.0/docker-machine-`uname -s`-`uname -m` >/tmp/docker-machine && sudo install /tmp/docker-machine /usr/local/bin/docker-machine\n```\n\nWith Docker Machine upgraded, we can move on to setting up our access tokens for GitLab Runner to use.\n\n## Step 7: Gather DigitalOcean credentials\n\nNow we need to create the credentials that GitLab Runner will use to create new Droplets using your DigitalOcean account.\n\nVisit your DigitalOcean [dashboard](https://cloud.digitalocean.com) and click **API**. On the next screen, look for **Personal access tokens** and click **Generate New Token**.\n\nGive the new token a name you will recognize such as `GitLab Runner Access` and ensure that both the read and write scopes are enabled, as we need the Droplet to create new machines without human intervention.\n\nCopy the token somewhere safe as we’ll use it in the next step. You can’t retrieve this token again without regenerating it, so be sure it’s stored securely.\n\n## Step 8: Edit GitLab Runner configuration files\nTo bring all of these components together, we need to finish configuring our bastion Droplet to communicate with your DigitalOcean account.\n\nIn your SSH connection to your bastion Droplet, use your favorite text editor, such as nano, to open the GitLab Runner configuration file for editing:\n\n```bash\nnano /etc/gitlab-runner/config.toml\n```\n\nThis configuration file is responsible for the rules your CI setup uses to scale up and down on demand. To configure the bastion to autoscale on demand, you need to add the following lines:\n\n```bash\nconcurrent = 50   # All registered Runners can run up to 50 concurrent builds\n\n[[runners]]\n  url = \"https://example.digitalocean.com\"\n  token = \"existinggitlabtoken\"             # Note this is different from the registration token used by `gitlab-runner register`\n  name = \"example-runner\"\n  executor = \"docker+machine\"        # This Runner is using the 'docker+machine' executor\n  limit = 10                         # This Runner can execute up to 10 builds (created machines)\n  [runners.docker]\n    image = \"alpine:latest\"               # Our secure image\n  [runners.machine]\n    IdleCount = 1                    # The amount of idle machines we require for CI if build queue is empty\n    IdleTime = 600                   # Each machine can be idle for up to 600 seconds, then destroyed\n    MachineName = \"gitlab-runner-autoscale-%s\"    # Each machine will have a unique name ('%s' is required and generates a random number)\n    MachineDriver = \"digitalocean\"   # Docker Machine is using the 'digitalocean' driver\n    MachineOptions = [\n        \"digitalocean-image=coreos-stable\", # The DigitalOcean system image to use by default\n        \"digitalocean-ssh-user=core\", # The default SSH user\n        \"digitalocean-access-token=DO_ACCESS_TOKEN\", # Access token from Step 7\n        \"digitalocean-region=nyc3\", # The data center to spawn runners in\n        \"digitalocean-size=1gb\", # The size (and price category) of your spawned runners\n        \"digitalocean-private-networking\" # Enable private networking on runners\n    ]\n  [runners.cache]\n    Type = \"s3\"   # The Runner is using a distributed cache with the S3-compatible Spaces service\n    ServerAddress = \"nyc3.spaces.digitaloceanspaces.com\"\n    AccessKey = \"YOUR_SPACES_KEY\"\n    SecretKey = \"YOUR_SPACES_SECRET\"\n    BucketName = \"your_bucket_name\"\n    Insecure = true # We do not have a SSL certificate, as we are only running locally\n```\n\nOnce you’ve added the new lines, customize the access token, region and Droplet size based on  your setup. For the purposes of this tutorial, we’ve used the smallest Droplet size of 1GB and created our Droplets in NYC3. Be sure to use the information that is relevant in your case.\n\nYou also need to customize the cache component, and enter your Space’s server address from the infrastructure configuration step, access key, secret key and the name of the Space that you created.\n\nWhen completed, restart GitLab Runner to make sure the configuration is being used:\n\n```bash\ngitlab-runner restart\n```\n\nIf you would like to learn about more all available options, including off-peak hours, you can read [GitLab’s advanced documentation](https://docs.gitlab.com/runner/configuration/autoscale.html).\n\n## Step 9 — Test Your GitLab Runner\n\nAt this point, our GitLab Runner bastion Droplet is configured and is able to create DigitalOcean Droplets on demand, as the CI queue fills up. We’ll need to test it to be sure it works by heading to your GitLab instance and the project we imported in Step 1.\n\nTo trigger a build, edit the `readme.md` file by clicking on it, then clicking **edit**, and add any relevant testing text to the file, then click **Commit changes**.\n\nNow a build will be automatically triggered, which can be found under the project’s **CI/CD** option in the left navigation.\n\nOn this page you should see a pipeline entry with the status of **running**. In your DigitalOcean account, you’ll see a number of Droplets automatically created by GitLab Runner in order to build this change.\n\nCongratulations! Your CI pipeline is cloud scalable and now manages its own resource usage. After the specified idle time, the machines should be automatically destroyed, but we recommend verifying this manually to ensure you aren’t unexpectedly billed.\n\n## Troubleshooting\n\nIn some cases, GitLab may report that the runner is unreachable and as a result perform no actions, including deploying new runners. You can troubleshoot this by stopping GitLab Runner, then starting it again in debug mode:\n\n```bash\ngitlab-runner stop\ngitlab-runner --debug start\n```\n\nThe output should throw an error, which will be helpful in determining which configuration is causing the issue.\n\nIf your configuration creates too many machines, and you wish to remove them all at the same time, you can run this command to destroy them all:\n\n```bash\ndocker-machine rm $(docker-machine ls -q)\n```\nFor more troubleshooting steps and additional configuration options, you can refer to [GitLab’s documentation](https://docs.gitlab.com/runner/).\n\n## Conclusion\n\nYou've successfully set up an automated CI/CD pipeline using GitLab Runner and Docker. From here, you could configure higher levels of caching with Docker Registry to optimize performance or explore the use of tagging code builds to specific GitLab code runners.\n\nFor more on GitLab Runner, [see the detailed documentation](https://docs.gitlab.com/runner/), or to learn more, you can read [GitLab’s series of blog posts](https://docs.gitlab.com/ee/ci/) on how to make the most of your continuous integration pipeline.\n\n[This post was originally published by DigitalOcean](https://www.digitalocean.com/community/tutorials/how-to-autoscale-gitlab-continuous-deployment-with-gitlab-runner-on-digitalocean) and is licensed under [CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/).\n{: .note}\n",[832,232],{"slug":7384,"featured":6,"template":678},"autoscale-continuous-deployment-gitlab-runner-digital-ocean","content:en-us:blog:autoscale-continuous-deployment-gitlab-runner-digital-ocean.yml","Autoscale Continuous Deployment Gitlab Runner Digital Ocean","en-us/blog/autoscale-continuous-deployment-gitlab-runner-digital-ocean.yml","en-us/blog/autoscale-continuous-deployment-gitlab-runner-digital-ocean",{"_path":7390,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7391,"content":7397,"config":7403,"_id":7405,"_type":16,"title":7406,"_source":17,"_file":7407,"_stem":7408,"_extension":20},"/en-us/blog/introducing-gitlab-s-integrated-development-environment",{"title":7392,"description":7393,"ogTitle":7392,"ogDescription":7393,"noIndex":6,"ogImage":7394,"ogUrl":7395,"ogSiteName":692,"ogType":693,"canonicalUrls":7395,"schema":7396},"Meet the GitLab Web IDE","Here's how we went from a proof of concept to a new feature that makes it even easier for everyone to edit inside of GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678812/Blog/Hero%20Images/web-ide-cover.jpg","https://about.gitlab.com/blog/introducing-gitlab-s-integrated-development-environment","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Meet the GitLab Web IDE\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dimitrie Hoekstra\"}],\n        \"datePublished\": \"2018-06-15\",\n      }",{"title":7392,"description":7393,"authors":7398,"heroImage":7394,"date":7400,"body":7401,"category":14,"tags":7402},[7399],"Dimitrie Hoekstra","2018-06-15","\n\nGitLab has been doing much more for the application development workflow than just source code management and versioning for a while – now spanning everything from [portfolio management](https://docs.gitlab.com/ee/user/group/epics/index.html#epics) to the [entire DevOps lifecycle](/blog/from-dev-to-devops/). Having everyone work from and be familiar with the same interface has many advantages.\n\nAll that code that gets automatically tested and deployed to production has a human at its source though. With the speed of innovation in today’s web development, we saw a chance to help out both new as well as seasoned developers with writing, reviewing, and committing that code with more confidence. In [GitLab 10.7](/releases/2018/04/22/gitlab-10-7-released/) we released the first iteration of our Web IDE – here's how it happened.\n\n## From experiment towards product\n\nThe original idea came from staff developer [Jacob Schatz](/company/team/#jakecodes), who observed how non-developers were having a hard time editing multiple files and getting those changes committed.\n\nAlthough having discussed implementing an Integrated Development Environment (IDE) into GitLab with our CEO [Sid](/company/team/#sytses) and VP of Product [Job](/company/team/#Jobvo) before, it was never clear how to do that and what exact problems it would solve.\n\nAt some point, it dawned on us that the repository view might be the right vessel. Jacob set up a proof of concept where he made our file viewer work in the context of a file editor. It removed the page refresh when switching between files and it approached editing from a branch perspective instead of per file. The result was the beginning of the [Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/), although it was called the \"repo editor\" at that time.\n\n![Proof of concept multi-file editor](https://about.gitlab.com/images/blogimages/webide/multifileeditor.png){: .shadow.medium.center}\n\nSetting up that proof of concept was a [tremendous amount of work](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/12198) and was time-boxed to one month. Jacob also had other responsibilities, and there was still a long way to go from concept to minimal viable product (MVP).\n\nProduct, UX, and other developers got involved to see if this could be pushed towards production. The concept solved a problem, but did it align with our vision? How could we holistically integrate this and make it a great experience? How could we get it to perform well for many different users?\n\n## The next phase\n\nIt took some time, but it was clear that we were aiming for a real integrated development experience, accessible for everyone right within the GitLab UI, without anything to install. The idea grew from the \"Repo editor\" into that of the \"Web IDE.\"\n\nGitLab itself is open source (or rather [open core](/blog/gitlab-is-open-core-github-is-closed-source/)) and relies on many open source projects for its development. Jacob had already decided that the [Monaco editor](https://microsoft.github.io/monaco-editor/) was the perfect code editor to integrate. It had already proven itself within different contexts, was great for performance, and so could be considered a [boring solution](https://handbook.gitlab.com/handbook/values/#efficiency).\n\nOur UX manager [Sarrah Vesselov](/company/team/#SVesselov) did the initial design for the concept after which it got passed on to me. It was up to our platform product manager [James Ramsay](/company/team/#jamesramsay), our frontend engineering manager [Tim Zallman](/company/team/#tpmtim), senior frontend engineer [Phil Hughes](/company/team/#iamphill), and I as the UX Designer to redefine the prototype \"multi-file editor\" into the foundation capable of supporting our vision of an Integrated Development Environment with live previews and web terminals, that enables anyone to contribute.\n\n## Iterating on user experience\n\n### An integrated editor\n\nThe original \"multi-file editor\" was about committing multiple changes at once because this was annoying when updating the handbook or docs. Often those changes touched multiple files. It was a prototype that made it easier for people to contribute.\n\nThe more we thought about this idea, the greater the possibilities became. One of GitLab's unique advantages is being an integrated product. Building an editor that was integrated with GitLab and made it easier for anyone to contribute is a natural fit. However, the starting point of a prototype in the file list and blob editor wouldn't have been enough to handle this. Decoupling this was the first actionable item.\n\n>One of GitLab's unique advantages is being an integrated product. Building an editor that was integrated with GitLab and made it easier for anyone to contribute is a natural fit.\n\nThis change, which required a lot of discussion and a considerable amount of engineering work by our developers Phil and Tim, was where the project pivoted towards its new direction. The Web IDE got a lot more screen real estate as it no longer had to make room for the project sidebar and other page elements. We decided that the Web IDE would edit one branch at a time only and conceptualized the initial Git flow into the editor. Based on existing UI paradigms and inspired by other code editors like [VSCode](https://code.visualstudio.com/) and [Atom](https://atom.io/), we arrived at the well-known, three-pane layout.\n\n\u003Cdiv class=\"compare-images-2\">\n  \u003Cimg src=\"/images/blogimages/webide/web-ide-iteration-0-concept.png\" class=\"compare-image-top shadow\" alt=\"multi file editor concept\">\n  \u003Cimg src=\"/images/blogimages/webide/web-ide-iteration-1-concept.png\" class=\"compare-image-bottom shadow\" alt=\"web ide file editor concept\">\n\u003C/div>\n\nEven seasoned developers were once beginners, and getting new people accustomed to the Git workflow continues to be notoriously hard to tackle. We decided therefore that the core of the Web IDE experience should be stable before we can venture into more advanced concepts. We set out to make the \"editing to committing\" experience as good as possible and to create a foundation on which we can expand.\n\nEven while having [these discussions](https://gitlab.com/gitlab-org/gitlab-ce/issues/44316), development never stood still. We quickly had a working version of the Web IDE that relied on the Monaco editor. Our immediate efforts pushed towards getting that to a functional, viable state.\n\n### A review state\n\nDue to the potency of the Monaco editor, it became clear we had many options to choose from as to what to develop next. A review state was high up on that list, as it should be obvious what you are going to commit. Not only that, it introduced the possibility of being able to have an integrated merge request review experience in the context of the editing experience – something that has not been possible before.\n\nThis introduced the problem of managing states. After much discussion, we decided to go for editor states instead of file-specific states. Both the user perspective as well as the technical implementation benefited from this as it reduced complexity. It meant you were either editing your files or reviewing your changes across the files you had opened.\n\n![Web IDE edit and review states](https://about.gitlab.com/images/blogimages/webide/web-ide-states.png){: .shadow.medium.center}\n\nAt this point, we are nearing the current state of the Web IDE, though in GitLab 10.8 we could finally [realize the \"editing to committing\" experience](https://gitlab.com/gitlab-org/gitlab-ce/issues/44846) that we talked about before and which was conceptualized and [prototyped](https://framer.cloud/Cojmw/index.html) while developing GitLab 10.7. This was made possible as development reached a more stable state.\n\n### Deciding on hierarchy\n\nThe new experience had several objectives. It needed to introduce a more logical hierarchy for the panes to operate in. Based on that we could decide which panes would potentially show what information and where we could fit in any future more advanced features.\n\nThe second objective was to guide the user more intuitively from editing to committing. The editing and reviewing experience up until then showed its shortcomings as it was hard to switch modes and unclear when you were doing a good job. If even seasoned developers had a hard time using it, how could people just starting out ever hope to successfully contribute making use of it?\nJames and I went through many concepts and discussed both flow and hierarchy before getting into detailed mockups. Through the iterations, it became apparent we preferred our hierarchy to act from left to right. We decided we needed a similar paradigm as the activity bar shown in VSCode. The editor became far more usable as state changes were just one click away, regardless of which state you were already using. As committing was now a separate state as well, it brought a linearity to the entire flow as seen from the activity bar.\n\nThe last significant detail, which came out of a discarded design iteration, was a button to guide the user towards committing their changes. It introduced a little section at the bottom of each state with a blue commit button and a counter so you can see how many changes you have made – essential as we repurposed the right sidebar.\n\n\u003Cdiv class=\"compare-images-3\">\n  \u003Cimg src=\"/images/blogimages/webide/web-ide-left-1.png\" class=\"compare-image-top shadow\" alt=\"web ide revised concept edit mode\">\n  \u003Cimg src=\"/images/blogimages/webide/web-ide-left-2.png\" class=\"compare-image-middle shadow\" alt=\"web ide revised concept review mode\">\n  \u003Cimg src=\"/images/blogimages/webide/web-ide-left-3.png\" class=\"compare-image-bottom shadow\" alt=\"web ide revised concept commit mode\">\n\u003C/div>\n\n*Interested to see all iterations the concepts have gone through? Check out my [Web IDE directory](https://gitlab.com/gitlab-org/gitlab-design/tree/master/progress/dimitrie/web-ide) in GitLab's open source design library where we contribute all our design files!*\n\n## Just the beginning\n\nThe current state of the Web IDE is still only the beginning. We are planning for an even better experience in the future: one where we can integrate and support more advanced features, such as a live environment to test your code against and code review discussions which are directly resolvable.\n\nIn GitLab 11.0, shipping next Friday, we will already have the following improvements: you will be able to view the latest pipeline status and the job logs directly in context, and you will be able to quickly switch between both assigned and authored merge requests without leaving the Web IDE!\n\nThis and more will inevitably lead towards more interesting design decisions to be made. Some of these concepts are uncharted territory and are sure to be valuable to further speed up development and give developers more confidence. Our hope is that this is a valuable contribution to both the open source community as well as GitLab itself.\n\nDo you have great ideas to push this effort forwards or want to contribute yourself? Check out the [issue tracker](https://gitlab.com/groups/gitlab-org/-/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=web%20ide)!\n",[915,1979,1144],{"slug":7404,"featured":6,"template":678},"introducing-gitlab-s-integrated-development-environment","content:en-us:blog:introducing-gitlab-s-integrated-development-environment.yml","Introducing Gitlab S Integrated Development Environment","en-us/blog/introducing-gitlab-s-integrated-development-environment.yml","en-us/blog/introducing-gitlab-s-integrated-development-environment",{"_path":7410,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7411,"content":7417,"config":7423,"_id":7425,"_type":16,"title":7426,"_source":17,"_file":7427,"_stem":7428,"_extension":20},"/en-us/blog/keeping-git-commit-history-clean",{"title":7412,"description":7413,"ogTitle":7412,"ogDescription":7413,"noIndex":6,"ogImage":7414,"ogUrl":7415,"ogSiteName":692,"ogType":693,"canonicalUrls":7415,"schema":7416},"How (and why!) to keep your Git commit history clean","Git commit history is very easy to mess up, here's how you can fix it!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659457/Blog/Hero%20Images/keep-git-commit-history-clean.jpg","https://about.gitlab.com/blog/keeping-git-commit-history-clean","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How (and why!) to keep your Git commit history clean\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kushal Pandya\"}],\n        \"datePublished\": \"2018-06-07\",\n      }",{"title":7412,"description":7413,"authors":7418,"heroImage":7414,"date":7420,"body":7421,"category":14,"tags":7422},[7419],"Kushal Pandya","2018-06-07","\n\nGit commits are one of the key parts of a [Git repository](/solutions/source-code-management/), and more so, the _commit message_ is a life log for the repository. As the project/repository evolves over time (new features getting added, bugs being fixed, architecture being refactored), commit messages are the place where one can see what was changed and how. So it's important that these messages reflect the underlying change in a short, precise manner.\n\n## Why a meaningful Git commit history is important\n\nWhat does Git commit do? Git commit messages are the fingerprints that you leave on the code you touch. Any code that you commit today, a year from now when you look at the same change; you would be thankful for a clear, meaningful commit message that you wrote, and it will also make the lives of your fellow developers easier. When Git commits are isolated based on context, a bug which was introduced by a single commit becomes quicker to find, and the easier it is to revert the commit which caused the bug in the first place.\n\nWhile working on a large project, we often deal with a lot of moving parts that are updated, added or removed. Ensuring that commit messages are maintained in such cases could be tricky, especially when development spans across days, weeks, or even months. So to simplify the effort of maintaining concise commit history, this article will use some of the common situations that a developer might face while working on a Git repository.\n\n- [Situation 1: I need to change the most recent commit](#situation-1-i-need-to-change-the-most-recent-commit)\n- [Situation 2: I need to change a specific commit](#situation-2-i-need-to-change-a-specific-commit)\n- [Situation 3: I need to add, remove, or combine commits](#situation-3-i-need-to-add-remove-or-combine-commits)\n- [Situation 4: My commit history doesn't make sense, I need a fresh start!](#situation-4-my-commit-history-doesnt-make-sense-i-need-a-fresh-start)\n\nBut before we dive in, let's quickly go through what a typical development workflow looks like in our hypothetical Ruby application.\n\n**Note:** This article assumes that you are aware about basics of Git, how branches work, how to add uncommitted changes of a branch to stage and how to commit the changes. If you're unsure of these flows, [our documentation](https://docs.gitlab.com/ee/topics/git/index.html) is a great starting point.\n\n## A day in the life\n\nHere, we are working on a small Ruby on Rails project where we need to add a navigation view on the homepage and that involves updating and adding several files. Following is a step by step breakdown of the entire flow:\n\n- You start working on a feature with updating a single file; let's call it `application_controller.rb`\n- This feature requires you to also update a view: `index.html.haml`\n- You added a partial which is used in index page: `_navigation.html.haml`\n- Styles for the page also need to be updated to reflect the partial we added: `styles.css.scss`\n- Feature is now ready with the desired changes, time to also update tests; files to be updated are as follows:\n  - `application_controller_spec.rb`\n  - `navigation_spec.rb`\n- Tests are updated and passing as expected, now time to commit the changes!\n\nSince all the files belong to different territories of the architecture, we commit the changes isolated of each other to ensure that each commit represents a certain context and is made in a certain order. I usually prefer backend -> frontend order where most backend-centric change is committed first, followed by the middle layer and then by frontend-centric changes in the Git list commits.\n\n\n1.  `application_controller.rb` & `application_controller_spec.rb`; **Add routes for navigation**.\n2.  `_navigation.html.haml` &  `navigation_spec.rb`; **Page Navigation View**.\n3.  `index.html.haml`; **Render navigation partial**.\n4.  `styles.css.scss`; **Add styles for navigation**.\n\nNow that we have our changes committed, we create a merge request with the branch. Once you have merge request open, it typically gets reviewed by your peer before the changes are merged into repo's `master` branch. Now let's learn what different situations we may end up with during code review.\n\n## Situation 1: How to change the most recent Git commit\n\nImagine a case where the reviewer looked at `styles.css.scss` and suggested a change. In such a case, it is very simple to do the change as the stylesheet changes are part of **last** commit on your branch. Here's how we can handle this;\n\n- You directly do the necessary changes to `styles.css.scss` in your current branch.\n- Once you're done with the changes, add these changes to stage; run `git add styles.css.scss`.\n- Once changes are staged, we need to _add_ these changes to our last commit; run `git commit --amend`.\n  -  **Command breakdown**: Here, we're asking the `git commit` command to _amend_ whatever changes are present in stage to the most recent commit.\n- This will open your last commit in your Git-defined text editor which has the commit message **Add styles for navigation**.\n- Since we only updated the CSS declaration, we don't need to alter the commit message. At this point, you can just save and exit the text editor that Git opened for you and your changes will be reflected in the commit.\n\nSince you modified an existing Git commit, these changes are required to be _force pushed_ to your remote repo using `git push --force-with-lease \u003Cremote_name> \u003Cbranch_name>`. This command will override the commit `Add styles for navigation` on remote repo with updated commit that we just made in our local repo.\n\nOne thing to keep in mind while force pushing branches is that if you are working on the same branch with multiple people, force pushing may cause trouble for other users when they try to normally push their changes on a remote branch that has new commits force pushed. Hence, use this feature wisely. You can learn more about Git force push options [here](https://git-scm.com/docs/git-push#git-push---no-force-with-lease).\n\n## Situation 2: How to change a specific Git commit changes\n\nIn the previous situation, the Git commit change was rather simple as we had to modify only our last Git commit, but imagine if reviewer suggested to change something in `_navigation.html.haml`. In this case, it is second commit from the top, so changing it won't be as direct as it was in the first situation. Let's see how we can handle this:\n\nWhenever a commit is made in a branch, it is identified by a unique SHA-1 hash string. Think of it as a unique ID that separates one commit from another. You can view all the previous commits, along with their SHA-1 hashes in a branch by running the `git log` command. With this, you would see an output that looks somewhat as follows and is a list of commits, where the most recent commits are at the top;\n\n```\ncommit aa0a35a867ed2094da60042062e8f3d6000e3952 (HEAD -> add-page-navigation)\nAuthor: Kushal Pandya \u003Ckushal@gitlab.com>\nDate: Wed May 2 15:24:02 2018 +0530\n\n    Add styles for navigation\n\ncommit c22a3fa0c5cdc175f2b8232b9704079d27c619d0\nAuthor: Kushal Pandya \u003Ckushal@gitlab.com>\nDate: Wed May 2 08:42:52 2018 +0000\n\n    Render navigation partial\n\ncommit 4155df1cdc7be01c98b0773497ff65c22ba1549f\nAuthor: Kushal Pandya \u003Ckushal@gitlab.com>\nDate: Wed May 2 08:42:51 2018 +0000\n\n    Page Navigation View\n\ncommit 8d74af102941aa0b51e1a35b8ad731284e4b5a20\nAuthor: Kushal Pandya \u003Ckushal@gitlab.com>\nDate: Wed May 2 08:12:20 2018 +0000\n\n    Add routes for navigation\n```\n\nThis is where `git rebase` command comes into play. Whenever we wish to edit a specific commit with `git rebase`, we need to first rebase our branch by moving back HEAD to the point right _before_ the commit we wish to edit. In our case, we need to change the commit that reads `Page Navigation View`.\n\n![Commit Log](https://about.gitlab.com/images/blogimages/keeping-git-commit-history-clean/GitRebase.png){: .shadow.center.medium}\n\nHere, notice the hash of commit which is right before the commit we want to modify; copy the hash and perform the following steps:\n\n- Rebase the branch to move to commit before our target commit; run `git rebase -i 8d74af102941aa0b51e1a35b8ad731284e4b5a20`\n  -  **Git command breakdown**: Here we're running Git's `rebase` command with _interactive_ mode with provided SHA-1 hash as commit to rebase to.\n- This will run rebase command for Git in interactive mode and will open your text editor showing all of your commits that came _after_ the commit you rebased to. It will look somewhat like this:\n\n```\npick 4155df1cdc7 Page Navigation View\npick c22a3fa0c5c Render navigation partial\npick aa0a35a867e Add styles for navigation\n\n# Rebase 8d74af10294..aa0a35a867e onto 8d74af10294 (3 commands)\n#\n# Commands:\n# p, pick = use commit\n# r, reword = use commit, but edit the commit message\n# e, edit = use commit, but stop for amending\n# s, squash = use commit, but meld into previous commit\n# f, fixup = like \"squash\", but discard this commit's log message\n# x, exec = run command (the rest of the line) using shell\n# d, drop = remove Git commit\n#\n# These lines can be re-ordered; they are executed from top to bottom.\n#\n# If you remove a line here THAT COMMIT WILL BE LOST.\n#\n# However, if you remove everything, the rebase will be aborted.\n#\n# Note that empty commits are commented out\n```\n\nNotice how each commit has a word `pick` in front of it, and in the contents below, there are all possible keywords we can use. Since we want to _edit_ a commit, we need to change `pick 4155df1cdc7 Page Navigation View` to `edit 4155df1cdc7 Page Navigation View`. Save the changes and exit editor.\n\nNow your branch is rebased to the point in time right before the commit you made which included `_navigation.html.haml`. Open the file and perform desired changes as per the review feedback. Once you're done with the changes, stage them by running `git add _navigation.html.haml`.\n\nSince we have staged the changes, it is time to move branch HEAD back to the commit we originally had (while also including the new changes we added), run `git rebase --continue`, this will open your default editor in the terminal and show you the commit message that we edited during rebase; `Page Navigation View`. You can change this message if you wish, but we would leave it as it is for now, so save and exit the editor. At this point, Git will replay all the commits that followed after the commit you just edited and now branch `HEAD` is back to the top commit we originally had, and it also includes the new changes you made to one of the commits.\n\nSince we again modified a commit that's already present in remote repo, we need force push this branch again using `git push --force-with-lease \u003Cremote_name> \u003Cbranch_name>`.\n\n## Situation 3: How to add, remove, or combine Git commits\n\nA common situation is when you've made several commits just to fix something previously committed. Now let's reduce them as much as we can, combining them with the original commits.\n\nAll you need to do is start the interactive rebase as you would in the other scenarios.\n\n```\npick 4155df1cdc7 Page Navigation View\npick c22a3fa0c5c Render navigation partial\npick aa0a35a867e Add styles for navigation\npick 62e858a322 Fix a typo\npick 5c25eb48c8 Ops another fix\npick 7f0718efe9 Fix 2\npick f0ffc19ef7 Argh Another fix!\n```\n\nNow imagine you want to combine all those fixes into `c22a3fa0c5c Render navigation partial`. You just need to:\n\n1. Move the fixes up so that they are right below the commit you want to keep in the end.\n2. Change `pick` to `squash` or `fixup` for each of the fixes.\n\n*Note:* `squash` keeps the git fix commit messages in the description. `fixup` will forget the commit messages of the fixes and keep the original.\n\nYou'll end up with something like this:\n\n```\npick 4155df1cdc7 Page Navigation View\npick c22a3fa0c5c Render navigation partial\nfixup 62e858a322 Fix a typo\nfixup 5c25eb48c8 Ops another fix\nfixup 7f0718efe9 Fix 2\nfixup f0ffc19ef7 Argh Another fix!\npick aa0a35a867e Add styles for navigation\n```\n\nSave the changes, exit the editor, and you're done! This is the resulting history:\n\n```\npick 4155df1cdc7 Page Navigation View\npick 96373c0bcf Render navigation partial\npick aa0a35a867e Add styles for navigation\n```\n\nAs before, all you need to do now is `git push --force-with-lease \u003Cremote_name> \u003Cbranch_name>` and the changes are up.\n\nIf you want to remove a Git commit from branch altogether, instead of `squash` or `fixup`, just write `drop` or simply delete that line.\n\n### How to avoid Git commit conflicts\n\nTo avoid conflicts, make sure the commits you're moving up the timeline aren't touching the same files touched by the commits left after them.\n\n```\npick 4155df1cdc7 Page Navigation View\npick c22a3fa0c5c Render navigation partial\nfixup 62e858a322 Fix a typo                 # this changes styles.css\nfixup 5c25eb48c8 Ops another fix            # this changes image/logo.svg\nfixup 7f0718efe9 Fix 2                      # this changes styles.css\nfixup f0ffc19ef7 Argh Another fix!          # this changes styles.css\npick aa0a35a867e Add styles for navigation  # this changes index.html (no conflict)\n```\n\n### Pro-tip: Quick Git commit `fixup`s\n\nIf you know exactly which commit you want to fixup, when committing you don't have to waste brain cycles thinking of good temporary names for \"Fix 1\", \"Fix 2\", ..., \"Fix 42\".\n\n**Step 1: Meet `--fixup`**\n\nAfter you've staged the changes fixing whatever it is that needs fixing, just Git commit all the changes like this:\n\n```\ngit commit --fixup c22a3fa0c5c\n```\n(Note that this is the hash for the commit `c22a3fa0c5c Render navigation partial`)\n\nThis will generate this commit message: `fixup! Render navigation partial`.\n\n**Step 2: And the sidekick `--autosquash`**\n\nEasy interactive rebase. You can have `git` place the `fixup`s automatically in the right place.\n\n`git rebase -i 4155df1cdc7 --autosquash`\n\nHistory will be shown like so:\n```\npick 4155df1cdc7 Page Navigation View\npick c22a3fa0c5c Render navigation partial\nfixup 62e858a322 Fix a typo\nfixup 5c25eb48c8 Ops another fix\nfixup 7f0718efe9 Fix 2\nfixup f0ffc19ef7 Argh Another fix!\npick aa0a35a867e Add styles for navigation\n```\n\nReady for you to just review and proceed.\n\nIf you're feeling adventurous you can do a non-interactive rebase `git rebase --autosquash`, but only if you like living dangerously, as you'll have no opportunity to review the squashes being made before they're applied.\n\n## Situation 4: My Git commit history doesn't make sense, I need a fresh start!\n\nIf we're working on a large feature, it is common to have several fixup and review-feedback changes that are being committed frequently. Instead of constantly rebasing the branch, we can leave the cleaning up of Git commits until the end of development.\n\nThis is where creating patch files is extremely handy. In fact, patch files were the primary way of sharing code over email while collaborating on large open source projects before Git-based services like GitLab were available to developers. Imagine you have one such branch (eg; `add-page-navigation`) where there are tons of commits that don't convey the underlying changes clearly. Here's how you can create a patch file for all the changes you made in this branch:\n\n- The first step to create the patch file is to make sure that your branch has all the changes present from `master` branch and has no conflicts with the same.\n- You can run `git rebase master` or `git merge master` while you're checked out in `add-page-navigation` branch to get all the changes from `master` on to your branch.\n- Now create the patch file; run `git diff master add-page-navigation > ~/add_page_navigation.patch`.\n  -  **Command breakdown**: Here we're using Git's _diff_ feature, and asking for a diff between `master` branch and `add-page-navigation` branch, and _redirecting_ the output (via `>` symbol) to a file named `add_page_navigation.patch` in our user home directory (typically `~/` in *nix operating systems).\n- You can specify any path you wish to keep this file in and the file name and extension could be anything you want.\n- Once the command is run and you don't see any errors, the patch file is generated.\n- Now checkout `master` branch; run `git checkout master`.\n- Delete the branch `add-page-navigation` from local repo; run `git branch -D add-page-navigation`. Remember, we already have changes of this branch in a created patch file.\n- Now create a new branch with the same name (while `master` is checked out); run `git checkout -b add-page-navigation`.\n- At this point, this is a fresh branch and doesn't have any of your changes.\n- Finally, apply your changes from the patch file; `git apply ~/add_page_navigation.patch`.\n- Here, all of your changes are applied in a branch and they will appear as uncommitted, as if all your modification where done, but none of the modifications were actually committed in the branch.\n- Now you can go ahead and commit individual files or files grouped by area of impact in the order you want with concise commit messages.\n\nAs with previous situations, we basically modified the whole branch, so it is time to force push!\n\n## Git commit history: Conclusion\n\nWhile we have covered most common and basic situations that arise in a day-to-day workflow with Git, rewriting Git history is a vast topic and as you get familiar with above tips, you can learn more advanced concepts around the subject in the [Git Official Documentation](https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History). Happy git'ing!\n\nPhoto by [pan xiaozhen](https://unsplash.com/photos/pj-BrFZ9eAA?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/clean?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[702,727],{"slug":7424,"featured":6,"template":678},"keeping-git-commit-history-clean","content:en-us:blog:keeping-git-commit-history-clean.yml","Keeping Git Commit History Clean","en-us/blog/keeping-git-commit-history-clean.yml","en-us/blog/keeping-git-commit-history-clean",{"_path":7430,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7431,"content":7437,"config":7442,"_id":7444,"_type":16,"title":7445,"_source":17,"_file":7446,"_stem":7447,"_extension":20},"/en-us/blog/journey-in-native-unicode-emoji",{"title":7432,"description":7433,"ogTitle":7432,"ogDescription":7433,"noIndex":6,"ogImage":7434,"ogUrl":7435,"ogSiteName":692,"ogType":693,"canonicalUrls":7435,"schema":7436},"Our journey in switching to native Unicode emoji","Unicode is hard. Here's a guide to getting native Unicode Emoji right 👌. Learn more!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672573/Blog/Hero%20Images/journey-in-native-unicode-emoji-cover.png","https://about.gitlab.com/blog/journey-in-native-unicode-emoji","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Our journey in switching to native Unicode emoji\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Eric Eastwood\"}],\n        \"datePublished\": \"2018-05-30\",\n      }",{"title":7432,"description":7433,"authors":7438,"heroImage":7434,"date":7439,"body":7440,"category":14,"tags":7441},[5753],"2018-05-30","The switch from image-based emoji to native Unicode wasn't a straightforward journey and included many intricacies to get production ready. Support varies widely on each OS, even between the browsers on the OS. We also wanted to support falling back to image-based emoji for environments that do not support everything yet, otherwise people would see black squares (□). As a simple example, most Linux environments do not have Unicode emoji support unless you manually install a font. I consider this blog post the survival manual I wish I had had when implementing native Unicode emoji myself.\n\n## What is Unicode emoji?\n\nUnicode emoji is a universal character encoding standard maintained by the [Unicode Consortium](https://home.unicode.org/basic-info/overview/) and It provides the basis for processing, storing, and interchanging text data in any language. As far as emojis themselves are concerned, this is the encoding system that develops and houses all emojis. Emojis are encoded in the Unicode system based on appearance rather than a specific semantic. \n\n## Are Unicode emojis compatible with all devices?\n\nThe short answer is yes!\n\nAll modern software providers have become compatible with Unicode so that data can be transferred freely without corruption, regardless of platform, language, or device. \n\nBefore Unicode, there were multiple character encoding systems to assign numbers to each of the letters and numbers that were used by computers. But these character encoding systems simply couldn’t keep up with the volume of languages using different letters and numbers. The data passing through these different encodings ran the risk of being corrupted due to a lack of sufficient support from a given computer - particularly servers.\n\nAnd so, a new system was born: Unicode.\n\n## Why move to native Unicode emoji?\n\nWe decided to switch to Unicode emoji because it was in line with our decision to use system fonts and it reduces the number of images loaded on a page. You can see the [full discussion in this issue](https://gitlab.com/gitlab-org/gitlab-ce/issues/26371). We were also [interested in](https://gitlab.com/gitlab-org/gitlab-ce/issues/22474) [improving](https://gitlab.com/gitlab-org/gitlab-ce/issues/27250) the award emoji menu (emoji reaction selector) performance, so it would open quickly without an AJAX request and with less janky scrolling.\n\nThe first step was to find a way to detect whether a given Unicode emoji is supported. Since new emoji/characters are introduced in new versions/releases of Unicode specifications from the [Unicode Consortium](http://unicode.org/), we can consider every emoji in that version supported if a single emoji in that version tests positively. There are exceptions to assuming support for a whole Unicode version, but we can handle them individually as they come up. Unicode 10 is the current stable release but [Unicode Consortium](http://unicode.org/) is working on finishing up Unicode 11 and starting on Unicode 12 at the moment. The Unicode Consortium has [a full table of emoji here with the representation on various platforms](https://unicode.org/emoji/charts/full-emoji-list.html).\n\n## Testing for native emoji Unicode support\n\nWe test an emoji from each Unicode version/release and cache that locally ([`localStorage`](https://developer.mozilla.org/en-US/docs/Web/API/Window/localStorage)) in a support map to look up later whether a given emoji is supported. If the emoji isn't supported we fall back to an image or CSS sprite depending on the situation.\n\nI couldn't find any existing library or JSON document that mapped a given emoji to their respective Unicode version/release, so I created my own project that scrapes [emojipedia](https://emojipedia.org/) and assembles a JSON map, [`emoji-unicode-version`](https://www.npmjs.com/package/emoji-unicode-version) on npm.\n\nTo test whether a Unicode emoji works, we render it to a `\u003Ccanvas>` and inspect the pixels in the exact middle for any color (if it is black, then the test fails). We also have to ensure the emoji renders as a single character because some emoji are made up of multiple characters (see [ZWJ sequences and skin tone modifier sections below](#emoji-made-up-of-multiple-characters)).\n\nWhen choosing a specific emoji for each version to test, be sure to choose something with color. As an example, I initially chose ⚽ `:soccer:` in the Unicode 5.2 range but since it is a black and white emoji, it always failed so I switched to ⛵ `:sailboat:`.\n\nWe invalidate the support map whenever your user-agent changes because emoji support changes when you get a browser or OS update. We also add a manual `GL_EMOJI_VERSION` for busting the cache when we update the support check logic.\n\nYou can check out our implementation here, [`app/assets/javascripts/emoji/support/unicode_support_map.js`](https://gitlab.com/gitlab-org/gitlab-ce/blob/ee189fd511e1a2c06f05e0d40e1d0b8875151391/app/assets/javascripts/emoji/support/unicode_support_map.js), [`app/assets/javascripts/emoji/support/is_emoji_unicode_supported.js`](https://gitlab.com/gitlab-org/gitlab-ce/blob/ee189fd511e1a2c06f05e0d40e1d0b8875151391/app/assets/javascripts/emoji/support/is_emoji_unicode_supported.js)\n\n### Rendering emoji to a canvas in Internet Explorer gotchas\n\nWhen rendering emoji to a `\u003Ccanvas>`, IE11 didn't like our full font-stack and renders small black and white emoji, which are less than ideal.\n\nThe culprit is the `-apple-system` piece 😕\n\n```js\nctx.font = `${fontSize}px -apple-system, BlinkMacSystemFont, \"Segoe UI\", Roboto, Oxygen-Sans, Ubuntu, Cantarell, \"Helvetica Neue\", sans-serif, \"Apple Color Emoji\", \"Segoe UI Emoji\", \"Segoe UI Symbol\"`;\n```\n\nBut if you simply go with the emoji part of the stack, it renders the nice colorful emoji as expected,\n\n```js\nctx.font = `${fontSize}px \"Apple Color Emoji\", \"Segoe UI Emoji\", \"Segoe UI Symbol\"`;\n```\n\nFull font-stack | Small emoji font-stack\n--- | ---\n[![](https://about.gitlab.com/images/blogimages/journey-in-native-unicode-emoji/ie-canvas-full-font-stack.png)](/images/blogimages/journey-in-native-unicode-emoji/ie-canvas-full-font-stack-large.png) | [![](https://about.gitlab.com/images/blogimages/journey-in-native-unicode-emoji/ie-canvas-short-font-stack.png)](/images/blogimages/journey-in-native-unicode-emoji/ie-canvas-short-font-stack-large.png)\n\n### Unicode 1.1 emoji not rendering as colorful, fancy glyphs when using full font-stack\n\nWe also switched to using a shorter, emoji-only font-stack in CSS to get some of the Unicode 1.1 emoji to render colorfully. Read [more in the issue](https://gitlab.com/gitlab-org/gitlab-ce/issues/29557#note_25544684).\n\nFull font-stack | Small emoji font-stack\n--- | ---\n![font-family: -apple-system, BlinkMacSystemFont, \"Segoe UI\", Roboto, Oxygen-Sans, Ubuntu, Cantarell, \"Helvetica Neue\", sans-serif, \"Apple Color Emoji\", \"Segoe UI Emoji\", \"Segoe UI Symbol\";](https://about.gitlab.com/images/blogimages/journey-in-native-unicode-emoji/unicode-1-1-full-font-stack.png) | ![font-family: \"Apple Color Emoji\", \"Segoe UI Emoji\", \"Segoe UI Symbol\";](https://about.gitlab.com/images/blogimages/journey-in-native-unicode-emoji/unicode-1-1-short-font-stack.png)\n\n### Render emoji on Canvas at 16px\n\nWe use `16px` font size when rendering to the `\u003Ccanvas>` because mobile Safari (iOS 9.3) will always render at 16px regardless of the font size you specify.\n\nThe `32px` pixel example below is rendering at the same size as the `16px` example. If it worked correctly, the `32px` would fill up the empty space.\n\n32px | 16px\n--- | ---\n\u003Ca href=\"/images/blogimages/journey-in-native-unicode-emoji/16px-font-size-at-32px-large.png\">\u003Cimg srcset=\"/images/blogimages/journey-in-native-unicode-emoji/16px-font-size-at-32px.png 2x\">\u003C/a> | \u003Ca href=\"/images/blogimages/journey-in-native-unicode-emoji/16px-font-size-at-16px-large.png\">\u003Cimg srcset=\"/images/blogimages/journey-in-native-unicode-emoji/16px-font-size-at-16px.png 2x\">\u003C/a>\n\n## Unicode emoji bounds and baseline positioning is different across platforms\n\nAnother issue we ran into when switching is the inconsistency in how emoji vertically align across platforms. The baseline defined in each platform font is different, which makes tweaks to center on one platform throw off another. We didn't find any good solution for perfect vertical centering and opted just to leave it for now. You can read the [full discussion here](https://gitlab.com/gitlab-org/gitlab-ce/issues/33044#note_34375144).\n\nFor an in-depth dive into font metrics (not emoji specific), see [*Deep dive CSS: font metrics, line-height and vertical-align*](http://iamvdo.me/en/blog/css-font-metrics-line-height-and-vertical-align) by Vincent De Oliveira (aka iamvdo).\n\n## Emoji fallbacks\n\nWe define optional fallbacks for images and CSS sprites directly on the element. In terms of priority, when `data-fallback-css-class` is defined on the emoji element, we opt to use the CSS sprite. We only sprite things like the award emoji menu, which lists every emoji at once and potentially needs to fall back on everything for platforms that don't support Unicode emoji (like Linux).\n\n```html\n\u003Cgl-emoji data-fallback-src=\"emoji-xxx.png\" data-fallback-css-class=\"emoji-xxx\">\n  xxx\n\u003C/gl-emoji>\n```\n\nWe use [`document.registerElement()`](https://developer.mozilla.org/en-US/docs/Web/API/Document/registerElement) in order to hook whenever a `\u003Cgl-emoji>` is used on the page or created and test whether we need to fall back. We use the deprecated v0 web components `document.registerElement()` over the new v1 [`CustomElementRegistry.define()`](https://developer.mozilla.org/en-US/docs/Web/API/CustomElementRegistry/define) because that only works with ES2015 class syntax and in our case, Babel is transpiling everything which makes that syntax incompatible for now. It is also necessary to use a [`document.registerElement()` polyfill](https://github.com/WebReflection/document-register-element) for browsers that don't support it like Safari.\n\nWhen we fall back to a CSS sprite, we add the necessary `.emoji-icon` classes to the `\u003Cgl-emoji>` tag. These extra CSS classes hide the emoji Unicode content inside so only the background image is visible.\n\n```css\n.emoji-icon {\n  /* Hide emoji Unicode */\n  color: transparent;\n  /* Hide emoji Unicode in IE */\n  text-indent: -99em;\n  /* ... */\n}\n```\n\nYou can check out our [`\u003Cgl-emoji>` implementation here](https://gitlab.com/gitlab-org/gitlab-ce/blob/ee189fd511e1a2c06f05e0d40e1d0b8875151391/app/assets/javascripts/behaviors/gl_emoji.js).\n\n## Emoji made up of multiple characters\n\nSome emoji are composed of multiple characters, which can be tricky to work with in JavaScript. [`Array.from`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/from), [`String.prototype.codePointAt()`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/codePointAt) are all your friends here. There is a great article, [*JavaScript has a Unicode problem*](https://mathiasbynens.be/notes/javascript-unicode) by Mathias Bynens, going into more detail.\n\n#### Zero Width Joiner (ZWJ) sequences\n\nZero Width Joiner (ZWJ) sequences are composed of multiple emoji characters joined by a ZWJ character `\\u{200D}`, `&zwj;`(non-printing character). You can read more about [ZWJ sequences here](http://emojipedia.org/emoji-zwj-sequences/).\n\n👨‍👩‍👧‍👦 `:family_mwgb:`\n```\n[...'👨‍👩‍👧‍👦']\n// [\"👨\", \"‍\", \"👩\", \"‍\", \"👧\", \"‍\", \"👦\"]\n```\n\n#### Skin tone modifier\n\nSkin tone modifiers don't need a ZWJ character to combine with another emoji. You can read more about the [skin tone modifiers here](http://emojipedia.org/modifiers/).\n\n👨🏿 `:man_tone5:`\n```\n[...'👨🏿']\n// [\"👨\", \"🏿\"]\n```\n\nI opted to test multiple skin tone modifier combos and only if all pass, consider skin tone modifiers supported at least on a basic level. There was still an outlier on macOS 10.12 where they don't have 🏇🏿 `:horse_racing_toneX:` and I added a separate test for it.\n\n## Emoji discrepancies\n\n### Flag emoji\n\nOn Windows, all `:flag_xx:` emoji are rendered as two-letter international characters instead of a colorful flag like on the Apple ecosystem.\n\n![](https://about.gitlab.com/images/blogimages/journey-in-native-unicode-emoji/flag-emoji-windows.png)\n\nOn Android 6, unknown flags are rendered as two-letter international characters.\n\n\u003Cimg srcset=\"/images/blogimages/journey-in-native-unicode-emoji/flag-emoji-android-6.png 2x\">\n\nOn Android 7, unknown flags are rendered as white flags with blue question marks on them.\n\n\u003Cimg srcset=\"/images/blogimages/journey-in-native-unicode-emoji/flag-emoji-android-7.png 2x\">\n\n### Keycap emoji on Windows\n\nKeycap (digit) emoji are a bit broken on Windows but appear to be fixed on Chrome 57+, 3️⃣4️⃣5️⃣\n\nBrowser | result\n--- | ---\nChrome 55.0.2883.87 (64-bit) ❌ | ![](https://about.gitlab.com/images/blogimages/journey-in-native-unicode-emoji/keycap-chrome-55.0.2883.87.png)\nChrome 56.0.2924.87 (64-bit) ❌ | ![](https://about.gitlab.com/images/blogimages/journey-in-native-unicode-emoji/keycap-chrome-56.0.2924.87.png)\nChromium 57.0.2984.0 (64-bit) ✅ | ![](https://about.gitlab.com/images/blogimages/journey-in-native-unicode-emoji/keycap-chrome-57.0.2984.0.png)\nChrome 58.0.2999.4 (Official Build) canary (64-bit) ✅ | ![](https://about.gitlab.com/images/blogimages/journey-in-native-unicode-emoji/keycap-chrome-58.0.2999.4.png)\n\n### Skin tone splitting from base emoji when width constrained\n\nStarting in Chrome 60+ (maybe 59.1+), the [🤼🏿 `:wrestlers_toneX:` and 🤝🏿 `:handshake_toneX:` emoji started splitting/breaking into separate pieces](https://gitlab.com/gitlab-org/gitlab-ce/issues/37654) (base emoji and skin tone) when their container is width constrained, causing overflow/wrapping.\n\nI created a [bug report on the Chromium tracker](https://bugs.chromium.org/p/chromium/issues/detail?id=764859) but it was closed a \"WontFix\" because the `wrestlers` and `handshake` emoji are no longer \"classified as Emoji_Base\" in the new International Components for Unicode (ICU) data which is used in Chrome.\n\nIt's understandable that those emoji are re-classified but they should display as two separate characters in all scenarios. The 🤼🏿 `:wrestlers_toneX:` emoji is consistently two characters now but the 🤝🏿 `handshake_toneX` still only splits when width constrained, which is pretty sketchy.\n\nCheck the comparison in these screenshots (or [demo for Chrome prior to 59.1](https://codepen.io/MadLittleMods/pen/dZMeXN)),\n\nWindows 10 | macOS\n--- | ---\n![](https://about.gitlab.com/images/blogimages/journey-in-native-unicode-emoji/skin-tone-splitting-windows-10.png) | \u003Cimg srcset=\"/images/blogimages/journey-in-native-unicode-emoji/skin-tone-splitting-macos.png 2x\">\n\n## Colliding with the object prototype `watch` function\n\nI ran into a small gotcha where some code was looking in an object map for the `watch` ⌚ key. In Firefox, it was pulling [`Object.prototype.watch()`](https://developer.mozilla.org/en-US/docs/Archive/Web/JavaScript/Object.watch) and causing havoc.\n\n```js\nconst emojiAliases = { foo: 'bar' };\n\n// Expect `undefined` but got some function\nemojiAliases['watch']\n```\n\nI fixed this code up by using the safe lookup [`Object.prototype.hasOwnProperty`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/hasOwnProperty),\n\n```js\nconst emojiAliases = { foo: 'bar' };\n\nObject.prototype.hasOwnProperty.call(emojiAliases, 'watch')\n```\n\n[`Object.prototype.watch()`](https://developer.mozilla.org/en-US/docs/Archive/Web/JavaScript/Object.watch) is now removed in Firefox 58 and the current stable release is Firefox 59.0.2 so you probably won't run into this yourself. But it's still advisable to use `Object.prototype.hasOwnProperty()` for any current/future collisions.\n\n## Things to improve\n\n### Custom emoji\n\nWe are working on adding custom emoji (with animated GIF support). You can track [this issue](https://gitlab.com/gitlab-org/gitlab-ce/issues/13931) and see our [current iteration here](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/14609). It's not merged yet because we need to ensure it works with [Geo replication](https://docs.gitlab.com/ee/administration/geo/index.html.\n\n### Server-side rendered fallbacks\n\nTo speed up time to visible emoji (TTVE™ 😉) for people that have to fall back to image-based emoji, we could server-side render the fallback straight away.\n\nIn order to detect support from the server, on first page visit, we could set a cookie client-side (frontend JavaScript land) based on the unicode support map. Cookies are sent with each request and could be read on the server.\n\nWe have some layers of cache on our Markdown rendering which makes this a bit difficult to do as we would need a response for both the `true` and `false` emoji support. Or we could post-process every request and update the rendered markdown HTML accordingly.\n\n### SVG fallbacks\n\nUsing the [EmojiOne SVG](https://github.com/emojione/emojione/tree/2.2.7/assets/svg) fallbacks would be a nice step above the `.png` images currently. This would save on bandwidth and we would get nice, crisp fallback emoji.\n\nWe could even take it a step further and extract SVGs from the OS specific fonts. For older versions of Windows, we could use the Windows 10 fonts so that everything has the appropriate signature black outline/stroke.\n\nThe EmojiOne SVGs fit nicely on macOS, so nothing to really change there.\n\n### Improving performance\n\nCurrently, we have to bundle a large `digests.json` file into our JavaScript bundles to get the necessary asset digest hash information to serve fallback images.\n\nFor some quick-wins, we can remove those hashes to reduce the file size and serve the JSON payload async. There are some [more ideas in this issue](https://gitlab.com/gitlab-org/gitlab-ce/issues/39000).",[1979],{"slug":7443,"featured":6,"template":678},"journey-in-native-unicode-emoji","content:en-us:blog:journey-in-native-unicode-emoji.yml","Journey In Native Unicode Emoji","en-us/blog/journey-in-native-unicode-emoji.yml","en-us/blog/journey-in-native-unicode-emoji",{"_path":7449,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7450,"content":7455,"config":7460,"_id":7462,"_type":16,"title":7463,"_source":17,"_file":7464,"_stem":7465,"_extension":20},"/en-us/blog/gke-webcast-recap-post",{"title":7451,"description":7452,"ogTitle":7451,"ogDescription":7452,"noIndex":6,"ogImage":6819,"ogUrl":7453,"ogSiteName":692,"ogType":693,"canonicalUrls":7453,"schema":7454},"Scalable app deployment with GitLab and Google Cloud Platform","Get the power to spin up a Kubernetes cluster managed by Google Cloud Platform in a few clicks – watch the demo of our native integration.","https://about.gitlab.com/blog/gke-webcast-recap-post","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Scalable app deployment with GitLab and Google Cloud Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2018-05-10\",\n      }",{"title":7451,"description":7452,"authors":7456,"heroImage":6819,"date":7457,"body":7458,"category":14,"tags":7459},[6768],"2018-05-10","\n\nThe GitLab + Google Kubernetes Engine integration's versatility speeds up software development and delivery while maintaining security and scale, allowing developers to focus on building apps instead of managing infrastructure. William Chia, Senior Product Marketing Manager at GitLab, and guest speaker William Denniss, Product Manager at Google, recently met to discuss the benefits of the integration.\n\n- [What is the GitLab GKE integration?](#what-is-the-gitlab-gke-integration)\n- [What's in the webcast?](#whats-in-the-webcast)\n- [Watch the recording](#watch-the-recording)\n- [Key takeaways](#key-takeaways)\n- [Webcast Q&A](#webcast-qa)\n\n## What is the GitLab GKE integration?\n\nWith our native Google Kubernetes Engine integration, you can automatically spin up a cluster to deploy applications, with just a few clicks. Simply connect your Google account, enter a few details, and GitLab will create the clusters for you. The clusters are fully managed by Google and run on Google Cloud Platform’s best-in-class infrastructure.\n\n## What's in the webcast\n\nWilliam Chia, Senior Product Marketing Manager at GitLab, and William Denniss, Product Manager at Google, explain how to deploy applications at scale using GKE and GitLab’s robust Auto DevOps capabilities.\n\nWe start with a crash course in Kubernetes, examining containers and deployment, before taking a closer look at the [Google Kubernetes Engine integration](/partners/technology-partners/google-cloud-platform/) and seeing it in action.\n\n## Watch the recording\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/uWC2QKv15mk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Key takeaways\n\n#### A seamless collaboration\n\n>Using GitLab with GKE creates an environment in which you just need to merge your code, and GitLab does all the rest. - William Chia, GitLab Senior Product Marketing Manager\n\n#### Kubernetes for success\n\n>If you go with Kubernetes, it gives you a good start. You can hit a button and configure GKE to do it for you and scale massively when you need to. It really sets you up for success. GitLab is a really great way to get started with Kubernetes, because it sets up everything nicely for you in an automated way. - William Denniss, Google Product Manager\n\n## Webcast Q&A\n\nDuring the webcast, live participants chatted in questions to the team. Here are some of the answers that were given via chat along with several questions we didn’t get a chance to answer during the webcast.\n\n>Does Kubernetes have a built-in load balancer?\n\nIt does have support for load balancing across pods within a service. You may also need an external load balancer, in the event you have multiple nodes. Creating a [Kubernetes Service object](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster) and an [external load balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer) are great first steps.\n\n>Is it possible to deploy multiple projects in the same Kubernetes cluster?\n\nIt is, you can add the cluster manually to additional projects. We are also working to make this easier in our UI, with [support for defining clusters at the group level](https://gitlab.com/gitlab-org/gitlab-ce/issues/34758).\n\n>So coming back to the setup of a cluster. If you have a separate environment for development, test, acceptance, and production, it seems we would have multiple options, like multiple clusters, or one cluster with multiple environments. Or even one cluster, one environment and point the correct environment in the `.gitlab-ci.yml` file (environment page in GitLab). What do you recommend to use to have a nice CI/CD integration and still separate environments?\n\nWe support integrating multiple clusters into a single project, and you can define which environments should be deployed to which clusters by [using the environment scope](https://docs.gitlab.com/ee/user/project/clusters/#setting-the-environment-scope).\n\n>Is it possible to add several clusters to the same project? To isolate environments based on clusters rather than namespaces.\n\nYes, this is a feature of GitLab Premium/Silver. (Note: Open source projects on GitLab.com get all of the features of our top-tier plan for free. Public projects on GitLab.com also have this capability.)\n\n>Does GitLab support on-demand cluster creation for integration testing for QA environments?\n\nWe support the integration of multiple clusters, and you can define which cluster each environment should be deployed to. For example, you can state that all review apps should be deployed into one cluster. If you would like to dynamically create a cluster during a test, you of course can do that as well by scripting that in a job.\n\n>Are these features available on GitLab CE?\n\nCluster integration and the main Auto DevOps functionality are available in Core (CE or EE without a license). Some jobs do require Premium, and they are noted in our [Auto DevOps documentation](https://docs.gitlab.com/ee/topics/autodevops/#stages-of-auto-devops).\n\n>The test stages are paid features, right?\n\nMany test jobs are open source features available in Core, and indeed some do require an paid license. The requirements for each job are noted in our [Auto DevOps documentation](https://docs.gitlab.com/ee/topics/autodevops/#stages-of-auto-devops).\n\n>What did you mean: “You can run Enterprise Edition without a license?”\n\nGitLab Enterprise Edition uses a license key to grant you access to the features of the Starter, Premium, and Ultimate plans. If you install Enterprise Edition and don’t have a license key, then you will get access to all of the Core features.\n\n[Learn more about GitLab's tiers](/blog/gitlab-tiers/).\n\n[Learn if you should use Community Edition or Enterprise Edition](/install/ce-or-ee/).\n\n>Is there a free version of GKE for testing and learning?\n\nEvery new Google Cloud Platform account receives $300 in credit upon [signup](https://console.cloud.google.com/freetrial?utm_campaign=2018_cpanel&utm_source=gitlab&utm_medium=referral). In partnership with Google, GitLab is able to offer an additional $200 for new GCP accounts to get started with GitLab’s GKE Integration. This allows you ample usage to test and learn for free.  Visit the Google partner credit page to apply for the $200 additional credit.\n\n>I see there is a $200 credit for playing around with GitLab and GKE. Can you elaborate on that? How to receive it, etc... Is it available for personal use or for professional use only? A contact form opens that wants my professional email address.\n\nThe $200 partner credit is intended for professional use. You can apply by visiting the Google Cloud Platform [partner page](https://cloud.google.com/partners/partnercredit/?PCN=a0n60000006Vpz4AAC) and filling out the form. You'll receive an email from the Google team with a key to redeem your credit.\n\n>Will Prometheus also gather the metrics without Auto DevOps, for example our own `.gitlab-ci.yml`? Or do we need to get something from the DevOps template?\n\nWe detect common system services like the NGINX Ingress or Kubernetes CPU/Memory metrics. If you use the NGINX Ingress deployed from GitLab, it is automatically configured for exporting Prometheus metrics. Additional documentation is available in our [Prometheus documentation](https://docs.gitlab.com/ee/user/project/integrations/prometheus_library/nginx_ingress.html).\n\n>Will you also support AWS?\n\nOther providers are certainly items we are considering for future releases, but we started with GKE since we felt it has the best managed Kubernetes experience available today. Other clusters can always be added manually, with just a few extra steps.\n\n>What if GitLab is running on GKE itself, can you connect the app to the same Kubernetes cluster GitLab is running on? And how safe is it to run this auto-deployment on your existing Kubernetes clusters/cluster GitLab is running on? Looks as if you could easily waste your cluster with this.\n\nIf you’re running GitLab on GKE, you can definitely connect it to the same cluster GitLab is running on to execute your GitLab runners, and as the deployment target for Auto DevOps. I’d advise to use separate namespaces for your GitLab instance to avoid any interference.\n\nNamespaces are the key to achieving workload isolation in Kubernetes; they provide isolation between different deployments to avoid one accidentally influencing the other. If you like (and it’s a bit more configuration), you can even use RBAC to prevent any developer pipelines from ever touching production.\n\nIf you want total isolation, then create a separate GCP project, with a separate cluster for production :) This is definitely the best practice for larger deployments.\n\n>I have been playing around with the `dependency_scanning`/`sast`/`dast` jobs, but the images are not cached on the runner. Will they be cached in (near) future or do we need to add any configuration?\n\nWe use Docker-in-Docker for most of these jobs, so caching is a bit tricky, and we have an [issue tracking this](https://gitlab.com/gitlab-org/gitlab-ce/issues/17861).\n\n>What does GitLab use to create the container image?\n\nAuto DevOps uses Herokuish and Heroku buildpacks to automatically detect and build the application into a Docker image. If you add a Dockerfile to your repo, GitLab will use docker build to create a Docker image.\n\n>Does the GKE/Kubenetes integration require the GitLab installation to be publicly accessible from the internet? Or will it work just as well if the GitLab server is private?\n\nIt does not, but if you deploy a runner to the cluster it will need to be able to access the GitLab server to pick up jobs and do its Git clones.\n\n>How does one manage to different `.env` files for different environments with GitLab CI?\n\nIf you define environment variables at the project level, you can specify which ones are available for which environments by following the [documentation on limiting environment scopes](https://docs.gitlab.com/ee/ci/variables/#limiting-environment-scopes-of-secret-variables).\n\n>What do I do when I receive this error: “We could not verify that one of your projects on GCP has billing enabled. Please try again.”\n\nPlease read the second bullet on the [GCP billing on the documentation page](https://docs.gitlab.com/ee/user/project/clusters/#adding-and-creating-a-new-gke-cluster-via-gitlab), which should help ensure that billing is set up for your account.\n\n>Is there a setting to control the number of review apps which are running live at any given time? Worried about cost.\n\nNote that review apps only run on open Merge Requests. If you are using the Auto DevOps template, then once the code is merged, or the MR is closed, the review app shuts down. Today, there’s not a feature to limit the number of review apps, but there are a few options. Review app environments can be manually stopped from both the MR and the environments page. You can also disable review apps altogether.\n\n>What are requirements for installing the one-click applications to the cluster?\n\nHelm Tiller, Ingress, Prometheus, and GitLab Runner don't have any special requirements to install via one-click. The integration takes care to ensure the appropriate container images are used and everything is configured properly. The only prerequisite is to install Helm Tiller first (since it is used to install the other applications.) If you install these applications manually to your cluster, you can learn about the requirements for each on their respective documentation pages.\n\n>Does this replace solutions like Rancher?\n\nIn a nutshell, yes, the GitLab GKE integration provisions and manages clusters on GKE, alleviating the need for Rancher. But this also depends on your needs. You can use GitLab with or without Rancher. For example, if you are using AKS or EKS, then Rancher will provision and manage your cluster automatically, while this requires manual configuration on GitLab.\n\n>What is the current state of installing GitLab on Kubernetes?\n\nGitLab has two Helm charts for installing GitLab on Kubernetes – the GitLab-Omnibus chart and the cloud native GitLab chart.\n\nGitLab-Omnibus: The best way to run GitLab on Kubernetes today, suited for small deployments. The chart is in beta and will be deprecated by the cloud native GitLab chart.\nCloud native GitLab chart: The next generation GitLab chart, currently in alpha. Will support large deployments with horizontal scaling of individual GitLab components. For more information, please visit [the GitLab Helm chart documentation page](https://docs.gitlab.com/charts/).\n\n>How usable is the new Helm chart for GitLab on Kubernetes?\n\nIt is in alpha, and we plan to have a beta available in May/June. We created [an issue](https://gitlab.com/groups/charts/-/epics/17) to note the items we are working to address before beta.\n\n>How can I enable Auto DevOps if I have `gitlab-ci.yml` file already, but for only build and test?\n\nAuto DevOps will use your custom `gitlab-ci.yml` file if it is present in your repo. If there is no file, then Auto DevOps will use the default Auto DevOps template. You can also see the [Auto DevOps template `gitlab-ci.yml`](https://gitlab.com/gitlab-org/gitlab-ci-yml/blob/master/Auto-DevOps.gitlab-ci.yml) and use it as a reference to add/update your `gitlab-ci.yml`. For more information, please visit [the customizing `.gitlab-ci.yml` documentation page](https://docs.gitlab.com/ee/topics/autodevops/#customizing-gitlab-ci-yml).\n\nHave you tried the GitLab + GKE integration? Tweet us [@gitlab](https://twitter.com/gitlab).\n",[1204,728,1002,5240,2932],{"slug":7461,"featured":6,"template":678},"gke-webcast-recap-post","content:en-us:blog:gke-webcast-recap-post.yml","Gke Webcast Recap Post","en-us/blog/gke-webcast-recap-post.yml","en-us/blog/gke-webcast-recap-post",{"_path":7467,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7468,"content":7473,"config":7479,"_id":7481,"_type":16,"title":7482,"_source":17,"_file":7483,"_stem":7484,"_extension":20},"/en-us/blog/using-gitlab-ci-to-build-gitlab-faster",{"title":7469,"description":7470,"ogTitle":7469,"ogDescription":7470,"noIndex":6,"ogImage":5617,"ogUrl":7471,"ogSiteName":692,"ogType":693,"canonicalUrls":7471,"schema":7472},"How we used GitLab CI to build GitLab faster","Here's how we went from a daily manual merge of GitLab Core into GitLab Enterprise to automated merges every three hours.","https://about.gitlab.com/blog/using-gitlab-ci-to-build-gitlab-faster","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we used GitLab CI to build GitLab faster\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rémy Coutable\"}],\n        \"datePublished\": \"2018-05-02\",\n      }",{"title":7469,"description":7470,"authors":7474,"heroImage":5617,"date":7476,"body":7477,"category":14,"tags":7478},[7475],"Rémy Coutable","2018-05-02","\n\nGitLab is an [open source project], but also a [commercial project]. For historic\nreasons, we have two Git repositories: [`gitlab-ce`] for GitLab Core and\n[`gitlab-ee`] for GitLab Enterprise packages (you can read [our recent blog post explaining GitLab self-managed tiers](/blog/gitlab-tiers/)).\nWhile we're working on having a [single codebase], we still need to regularly\nmerge [`gitlab-ce`] into [`gitlab-ee`] since most of the development happens on\nGitLab Core, but we also develop features on top of it for GitLab Starter, Premium, and Ultimate.\n\n## How we used to merge GitLab CE into GitLab EE\n\nUntil December 2017, the merge of [`gitlab-ce`] into [`gitlab-ee`] was manual\non a daily basis with basically the following commands ([see the full documentation]):\n\n```shell\n# the `origin` remote refers to https://gitlab.com/gitlab-org/gitlab-ee.git\n# the `ce` remote refers to https://gitlab.com/gitlab-org/gitlab-ce.git\ngit fetch origin master\ngit checkout -b ce-to-ee origin/master\ngit fetch ce master\ngit merge --no-ff ce/master\n```\n\nAt this point, since we'd merge a day's worth of GitLab Core's new commits,\nchances were good we'd see conflicts.\nMost of the time, the person responsible for this process would handle the\nconflict resolutions, commit them and push the `ce-to-ee` branch to GitLab.com.\n\nThere were a few problems with this approach:\n\n- GitLab's development pace is fast, which means the longer we go without a\n  merge, the more changes there are and thus more opportunities for conflicts\n- If we had many conflicts, it could take a significant amount of time for the\n  developer responsible for the merge\n- The developer performing the merge wasn't always the best person to resolve the\n  conflicts\n- Significant time was spent identifying and notifying developers to help resolve conflicts\n\n## The solution\n\nOur plan was to have a single script that would automate the merge, and in the\ncase of conflicts, identify the person best suited to resolve each of them.\nIt would then create the merge request using the [GitLab API] and a\n[GitLab API Ruby wrapper], and post a message in Slack when a new merge request\nwas created or an existing one was still pending.\n\nFinally, we'd use GitLab's [pipeline schedules] to run the script every three hours.\n\n### Step 1: Write the script\n\nWe chose to write the script in our [`release-tools`] project, since it already\nhad a strong foundation for working with the relevant Git repositories.\n\nThis script was written iteratively as a set of classes over the course of a few\nmonths:\n\n1. [Add the ability to find/create a merge request][!139]\n1. [Move remotes to the `Project` classes and get rid of the `Remotes` class][!168]\n1. [Add `head`, `status`, `log`, `fetch`, `checkout_new_branch`, `pull`, `push`, and `merge` to `RemoteRepository`][!177]\n1. [Introduce a new `CommitAuthor` class][!197]\n\nThe last piece of the puzzle was the new [`upstream_merge` Rake task][!219].\n\n### Step 2: Create a pair of SSH keys and add the public key to the `gitlab-ee` project\n\nUnder **Repository Settings > Deploy Keys** of the [`gitlab-ee`] project:\n\n![Deploy key in `gitlab-ee`](https://about.gitlab.com/images/blogimages/using-gitlab-ci-to-build-gitlab-faster/step2.png){: .shadow.center.medium}\n\n### Step 3: Create secret variables in the `release-tools` project\n\nUnder **CI / CD Settings** of the [`release-tools`] project, create three secret\nvariables:\n\n- `AUTO_UPSTREAM_MERGE_BOT_SSH_PRIVATE_KEY` for the SSH private key\n- `GITLAB_API_PRIVATE_TOKEN` is a personal access token for our [`@gitlab-bot`]\n  user\n- `SLACK_UPSTREAM_MERGE_URL` which is the Slack webhook URL we created\n  specifically for this job and used in our [`Slack::UpstreamMergeNotification` class]\n\n![Secret variable](https://about.gitlab.com/images/blogimages/using-gitlab-ci-to-build-gitlab-faster/step3.png){: .shadow.center.medium}\n\n### Step 4: Add a new CI job that runs the `upstream_merge` Rake task for pipeline schedules only\n\n*This was heavily inspired by [GitBot – automating boring Git operations with CI].*\n\nCreate a new `upstream-merge` CI job that:\n\n- Adds the SSH private key to the `~/.ssh` folder\n- Add `gitlab.com` to the `~/.ssh/known_hosts` file\n- Runs `bundle exec rake upstream_merge`\n\n![`upstream-merge` job](https://about.gitlab.com/images/blogimages/using-gitlab-ci-to-build-gitlab-faster/step4.png){: .shadow.center.medium}\n\nYou can [check out the task for yourself](https://gitlab.com/gitlab-org/release-tools/blob/1cd437823113d4529919c29b177bb2037c19fc3c/.gitlab-ci.yml#L50-64).\n\n### Step 5: Create a pipeline schedule that runs every three hours\n\nUnder **Schedules** of the [`release-tools`] project:\n\n![Pipeline schedule](https://about.gitlab.com/images/blogimages/using-gitlab-ci-to-build-gitlab-faster/step5.png){: .shadow.center.medium}\n\n### Step 6: Let the bot work for us!\n\n**The CI job:**\n\n![CI job](https://about.gitlab.com/images/blogimages/using-gitlab-ci-to-build-gitlab-faster/step6-1.png){: .shadow.center.medium}\n\n**The Slack messages:**\n\n![Slack messages](https://about.gitlab.com/images/blogimages/using-gitlab-ci-to-build-gitlab-faster/step6-2.png){: .shadow.center.medium}\n\n**The merge request:**\n\n![Merge request](https://about.gitlab.com/images/blogimages/using-gitlab-ci-to-build-gitlab-faster/step6-3.png){: .shadow.center.medium}\n\n## What are the benefits?\n\nSince we started automating this process in December 2017, our dear\n[`@gitlab-bot`] created no fewer than [229 automatic merges], and we started\nnoticing the benefits immediately:\n\n- Automating the merge request creation saved developers time and removed a manual\nchore.\n- Automatically identifying the developer who introduced a conflict and assigning\nthem to resolve it spread out the workload and reduced bugs caused by improper\nconflict resolution.\n- Performing the merge automatically every three hours instead of manually once a\nday led to fewer changes at a time and a reduced number of conflicts.\n\nThe last, perhaps least visible, but most important benefit, is that we reduced\ndeveloper frustration and increased happiness by removing a tedious chore.\n\n[Photo](https://unsplash.com/photos/w6OniVDCfn0?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) by Max Ostrozhinskiy on [Unsplash](https://unsplash.com/search/photos/build?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n\n[open source project]: /community/contribute/\n[commercial project]: /pricing/\n[`gitlab-ce`]: https://gitlab.com/gitlab-org/gitlab-ce\n[`gitlab-ee`]: https://gitlab.com/gitlab-org/gitlab-ee\n[single codebase]: https://gitlab.com/gitlab-org/gitlab-ee/issues/2952\n[see the full documentation]: https://gitlab.com/gitlab-org/release/docs/blob/master/general/merge-ce-into-ee.md\n[pipeline schedules]: https://docs.gitlab.com/ee/ci/pipelines/schedules.html\n[GitLab API]: https://docs.gitlab.com/ee/api/merge_requests.html\n[GitLab API Ruby wrapper]: https://rubygems.org/gems/gitlab\n[`release-tools`]: https://gitlab.com/gitlab-org/release-tools/\n[!139]: https://gitlab.com/gitlab-org/release-tools/merge_requests/139\n[!168]: https://gitlab.com/gitlab-org/release-tools/merge_requests/168\n[!177]: https://gitlab.com/gitlab-org/release-tools/merge_requests/177\n[!197]: https://gitlab.com/gitlab-org/release-tools/merge_requests/197\n[!219]: https://gitlab.com/gitlab-org/release-tools/merge_requests/219\n[`Slack::UpstreamMergeNotification` class]: https://gitlab.com/gitlab-org/release-tools/blob/1cd437823113d4529919c29b177bb2037c19fc3c/lib/slack/upstream_merge_notification.rb#L7\n[GitBot – automating boring Git operations with CI]: /2017/11/02/automating-boring-git-operations-gitlab-ci/\n[229 automatic merges]: https://gitlab.com/gitlab-org/gitlab-ee/merge_requests?scope=all&utf8=%E2%9C%93&state=merged&label_name[]=CE%20upstream&author_username=gitlab-bot\n[`@gitlab-bot`]: https://gitlab.com/gitlab-bot\n",[915,832],{"slug":7480,"featured":6,"template":678},"using-gitlab-ci-to-build-gitlab-faster","content:en-us:blog:using-gitlab-ci-to-build-gitlab-faster.yml","Using Gitlab Ci To Build Gitlab Faster","en-us/blog/using-gitlab-ci-to-build-gitlab-faster.yml","en-us/blog/using-gitlab-ci-to-build-gitlab-faster",{"_path":7486,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7487,"content":7492,"config":7497,"_id":7499,"_type":16,"title":7500,"_source":17,"_file":7501,"_stem":7502,"_extension":20},"/en-us/blog/getting-started-gitlab-ci-gcp",{"title":7488,"description":7489,"ogTitle":7488,"ogDescription":7489,"noIndex":6,"ogImage":6819,"ogUrl":7490,"ogSiteName":692,"ogType":693,"canonicalUrls":7490,"schema":7491},"Getting started with GitLab CI/CD and Google Cloud Platform","Discover how easy it is to set up CI/CD and Kubernetes deployment with our integration with Google Kubernetes Engine.","https://about.gitlab.com/blog/getting-started-gitlab-ci-gcp","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Getting started with GitLab CI/CD and Google Cloud Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2018-04-24\",\n      }",{"title":7488,"description":7489,"authors":7493,"heroImage":6819,"date":7494,"body":7495,"category":14,"tags":7496},[890],"2018-04-24","\n\nEarlier this month [we announced our new native integration with Google Kubernetes Engine (GKE)](/blog/gke-gitlab-integration/),\nallowing you to [set up CI/CD](/topics/ci-cd/) and Kubernetes deployment in just a few clicks. If you're new to\nGitLab CI on Google Cloud Platform (GCP), we've put together a quick [demo](#demo) and [instructions](#instructions) you can view below. For a more detailed walkthrough and the chance to ask questions, join us on April 26 for a [live demo](#join-google-and-gitlab-for-a-live-demo).\n\n## Demo\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/u3jFf3tTtMk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Instructions\n\n### Add a Kubernetes Engine cluster\n\nHead on over to the CI/CD -> Kubernetes menu option in the GitLab UI. Here you can add your existing cluster to your project or create a brand new one.\n\n![Add your Kubernetes cluster](https://about.gitlab.com/images/blogimages/gitlab-ci-gcp/step1.png){: .shadow.center.medium}\n\nOnce connected, you can install applications like [Helm Tiller](https://helm.sh/), [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/), [Prometheus](https://docs.gitlab.com/ee/administration/monitoring/prometheus/), and [GitLab Runner](https://docs.gitlab.com/ee/ci/runners/) to your cluster with just one click.\n\n![Install applications](https://about.gitlab.com/images/blogimages/gitlab-ci-gcp/install-applications.png){: .shadow.center.medium}\n\n### Enable Auto DevOps\n\nWe've also worked with Google to integrate [GitLab Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) with GKE. Using them together, you'll have a continuous deployment pipeline that automatically creates a [review app](https://docs.gitlab.com/ee/ci/review_apps/) for each merge request and once you merge, deploys the application into production on production-ready GKE.\n\nTo get started, go to CI/CD -> General pipeline settings, and select “Enable Auto DevOps.” For more information, read the [Auto DevOps docs](https://docs.gitlab.com/ee/topics/autodevops/).\n\n![Enable Auto DevOps](https://about.gitlab.com/images/blogimages/gitlab-ci-gcp/step2.png){: .shadow.center.medium}\n\nAuto DevOps takes the manual work out of CI/CD by automatically detecting what languages you’re using, and configuring a continuous integration and continuous deployment pipeline that results in your app running live on the Kubernetes Engine cluster.\n\n![Review pipeline](https://about.gitlab.com/images/blogimages/gitlab-ci-gcp/step3.png){: .shadow.center.medium}\n\nNow, whenever you create a merge request, we'll run a review pipeline to deploy a review app to your cluster where you can preview your changes. When you merge the code, GitLab will run a production pipeline to deploy your app to production, running on Kubernetes Engine!\n\n## Get $500 credit for your project\n\nEvery new Google Cloud Platform account receives $300 in credit [upon signup](https://console.cloud.google.com/freetrial?utm_campaign=2018_cpanel&utm_source=gitlab&utm_medium=referral). In partnership with Google, we're offering an additional $200 for both new and existing GCP accounts to get started with the GKE integration. Here's a link to [apply for your $200 credit](https://goo.gl/AaJzRW).\n\n## Join Google and GitLab for a live demo\n\nJoin Google’s [William Denniss](https://www.linkedin.com/in/williamdenniss/) and GitLab’s [William Chia](https://www.linkedin.com/in/williamchia/) for a walkthrough of the integration on April 26. You’ll learn how easy it is to set up a Kubernetes cluster, how to deploy your app using GitLab CI/CD, and how GKE enables you to deploy, update, and manage containerized applications at scale.\n\n[Register today](/webcast/scalable-app-deploy/)!\n",[728,1204,1002,2932],{"slug":7498,"featured":6,"template":678},"getting-started-gitlab-ci-gcp","content:en-us:blog:getting-started-gitlab-ci-gcp.yml","Getting Started Gitlab Ci Gcp","en-us/blog/getting-started-gitlab-ci-gcp.yml","en-us/blog/getting-started-gitlab-ci-gcp",{"_path":7504,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7505,"content":7511,"config":7517,"_id":7519,"_type":16,"title":7520,"_source":17,"_file":7521,"_stem":7522,"_extension":20},"/en-us/blog/five-things-i-wish-i-knew-about-kubernetes",{"title":7506,"description":7507,"ogTitle":7506,"ogDescription":7507,"noIndex":6,"ogImage":7508,"ogUrl":7509,"ogSiteName":692,"ogType":693,"canonicalUrls":7509,"schema":7510},"5 things I wish I'd known about Kubernetes before I started","Looking to dive into Kubernetes? Here’s some advice on how to get started from a GitLab engineer.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670146/Blog/Hero%20Images/containers-for-five-things-kubernetes-blog-post.jpg","https://about.gitlab.com/blog/five-things-i-wish-i-knew-about-kubernetes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 things I wish I'd known about Kubernetes before I started\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jason Plum\"}],\n        \"datePublished\": \"2018-04-16\",\n      }",{"title":7506,"description":7507,"authors":7512,"heroImage":7508,"date":7514,"body":7515,"category":14,"tags":7516},[7513],"Jason Plum","2018-04-16","\n\nI first encountered Kubernetes in January 2017 when our CEO [Sid Sijbrandij](/company/team/#sytses) challenged me and five other team members to get a live install functional on Kubernetes for an Idea to Production demo during the company summit in Cancún.\n\nPrior to the challenge I had never touched Kubernetes. Nonetheless, my team members and I conquered the challenge, completing the task a day before deadline to boot. You can [watch the demo here](#kubernetes-summit-challenge-demo).\n\nNow, a little more than a year later, I've taken a deeper dive into the container orchestration platform, leading my team in building and releasing the alpha version of the [cloud native GitLab helm chart](https://gitlab.com/charts/gitlab/blob/master/README.md), which allows for the deployment of GitLab on Kubernetes. With that experience fresh in mind, I've got a bit of advice for those looking to move into the world of Kubernetes:\n\n## The internet is your friend. Check out the documentation, online courses and walkthroughs.\n\nFirst things first, there are a couple of really good sets of documentation out there, and even a solid [course on edX](https://www.edx.org/course/introduction-to-kubernetes). These are all good choices. You don’t have to go through all of the courses to really get a running start with what’s going on. But if you want to get into the nitty-gritty, I would strongly suggest taking some of the courses. If all you want to do is see it work, be able to play with it and kind of get an idea of what it is, then you can get a [free trial](https://cloud.google.com/partners/partnercredit/?PCN=a0n60000006Vpz4AAC) with [GKE (Google Kubernetes Engine)](/blog/gke-gitlab-integration/), set up a little cluster and do a deployment that way. And if all you want to do is deploy a couple of your applications into the same cluster, we (GitLab) already have [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) that can hook everything together for you, and then you can use your entire workflow, do your deployments, and pop right in there. We’ll even help you spin up a GKE cluster with all the requirements [right from the UI](https://docs.gitlab.com/ee/user/project/clusters/#adding-and-creating-a-new-gke-cluster-via-gitlab).\n\nBut if you want to do it by hand the first time, that’s one of those things where you should start with the tutorial walkthroughs. Install the tools. They are all straightforward to get your hands on. Pull down one of the charts, try it, change some configuration options and retry it. Just play with it.\n\n## Be clear on how you will use Kubernetes.\n\nThe challenges you encounter in Kubernetes really depend on what you’re trying to do with it. Are you using it as a test round, are you using it as a staging environment, or are you going all the way in and going for production? Just using it for a development environment is not really complicated. You need to understand some basic concepts, like namespaces. You need to know what a secret is, what a configuration is, and what a deployment is. These core concepts will get you a very long way.\n\nBeyond that, you start getting into the involved steps. That’s where you need to understand what didn’t exist prior, like the role-based access controls, or RBAC, which is now involved with Kubernetes and also Helm. Those features did not exist a year ago, and now they do. They are becoming ever-present and even more involved. This is good for people doing production, engineers, SREs (site reliability engineers), deployments, customers, etc. because now you’re making sure that things aren’t touching other things they shouldn’t. It’s not an open, flat plane of network.\n\nNow you have fine-grained controls via RBAC. Multiple namespaces, with controls per namespace on access or creation to secrets and configuration. This allows you to have production-grade multi-tenant clusters where you are not concerned about neighbors stepping on each other or poking their nose where they don't belong. This is a big step compared to the state of Kubernetes as a whole in early 2017.\n\n> The thing I wish I knew was how fast it was going to develop. I walked into Kubernetes in January and then I walked away from it in February. When I came back to it in September, I was surprised by how much had changed. And then the same thing keeps happening every single release.\n\n## Don’t expect the same version on every service provider.\n\nI think the biggest thing that people should understand is that not all cloud providers provide the exact same version of Kubernetes. They’re all very close, they’re all almost identical, but the way in which certain features are implemented is slightly different. So, the way you get it on Azure’s container services and the way you get it on Amazon’s container services or GKE won't be exactly the same. Everybody’s implementation is slightly different. Perhaps the available version of the base functionality is going to be a little different, but the real difference will be between each of these providers' own product integrations.\n\nThen there’s the whole ‘roll your own’ approach, at which point you get to use really nifty plugins and other components that you can’t use out of the box with a cloud provider today. Play with it, but it still comes down to this: there are differences between the providers. Target mainline or vanilla, and it will work everywhere. Target a provider, and you’re now a part of that provider.\n\n## Be nimble. Change is constant, but don’t follow along blindly in an attempt to keep up.\n\nWow, there is just so much development. In the year from when I first touched Kubernetes to where I’m at now, the feature set has expanded quite a bit. And the controls that are required for large enterprises are now in place. These can bite you if you’re not paying attention, but they’re not horribly hard to understand if you’re willing to just take a moment and read. Also, everybody and their brother is now doing this and playing with this. Just because you see somebody else do it doesn’t mean it’s an industry best practice.\n\n## Last bit of sage advice: Seriously. DO NOT sleep on the releases.\n\nThe thing I wish I knew was how fast it was going to develop. I walked into Kubernetes in January and then I walked away from it in February. When I came back to it in September, I was surprised by how much had changed. And then the same thing keeps happening every single release.\n\nIt is a production-ready system. However, new feature sets and capabilities are evolving at such a pace that it can be hard to keep up with. You’re not breaking anything, but now there’s all these new, nifty features. All the shinies keep coming.\n\nThis is not a six-month release cycle software. I’m not going to install Kubernetes, walk away for a year and come back thinking I’ll simply be able to go to the next LTS (long-term support). You have to be present. You have to be paying attention. It doesn’t matter if you only check in once a month, you’ve got to check in once a month.\n\n",[1002],{"slug":7518,"featured":6,"template":678},"five-things-i-wish-i-knew-about-kubernetes","content:en-us:blog:five-things-i-wish-i-knew-about-kubernetes.yml","Five Things I Wish I Knew About Kubernetes","en-us/blog/five-things-i-wish-i-knew-about-kubernetes.yml","en-us/blog/five-things-i-wish-i-knew-about-kubernetes",{"_path":7524,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7525,"content":7531,"config":7538,"_id":7540,"_type":16,"title":7541,"_source":17,"_file":7542,"_stem":7543,"_extension":20},"/en-us/blog/monitoring-your-gitlab-environment-with-the-elk-stack",{"title":7526,"description":7527,"ogTitle":7526,"ogDescription":7527,"noIndex":6,"ogImage":7528,"ogUrl":7529,"ogSiteName":692,"ogType":693,"canonicalUrls":7529,"schema":7530},"GitLab monitoring: Setting up Logz.io and ELK stack","ELK, together with GitLab’s logging framework, gives organizations a comprehensive view for monitoring, troubleshooting, and analyzing team activity.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680160/Blog/Hero%20Images/gitlab-logz-io-cover.png","https://about.gitlab.com/blog/monitoring-your-gitlab-environment-with-the-elk-stack","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to set up advanced monitoring for your GitLab environment with Logz.io and the ELK stack\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Daniel Berman\"}],\n        \"datePublished\": \"2018-04-13\",\n      }",{"title":7532,"description":7527,"authors":7533,"heroImage":7528,"date":7535,"body":7536,"category":14,"tags":7537},"How to set up advanced monitoring for your GitLab environment with Logz.io and the ELK stack",[7534],"Daniel Berman","2018-04-13","\n\nGitLab comes with some built-in monitoring and visualization capabilities, such as [Cycle Analytics](/solutions/value-stream-management/) and the [per-project contributors](https://docs.gitlab.com/ee/user/group/contribution_analytics/) and [repository](https://docs.gitlab.com/ee/user/project/repository/#repository-graph) graphs, as well as [integration with Prometheus](https://docs.gitlab.com/ee/administration/monitoring/prometheus/) to monitor your GitLab instance at the server level. Cycle Analytics is especially useful as it enables teams to analyze their efficiency. However, if you want to analyze the data by searching and querying, or if you want to visualize the data yourself, you might find it helpful to adopt a more centralized methodology by integrating with the [ELK Stack](https://logz.io/learn/complete-guide-elk-stack/) (Elasticsearch, Logstash and Kibana).\n\n\u003C!-- more -->\n\nELK provides powerful log aggregation, analysis and visualization capabilities that, used in tandem with GitLab’s extensive logging framework, will give organizations an accurate and comprehensive bird's eye view of the system for monitoring, troubleshooting, and analyzing team activity. Using GitLab’s log data, for example, rich dashboards can be created to monitor not only the system’s general health but also specific team metrics, such as the number of commits, issues opened and closed, and so forth.\n\n[Logz.io](https://logz.io/) users can benefit from a built-in integration with GitLab and the additional analysis tools provided by the service, but if you’re using your own ELK deployment you’ll be able to set up the described integration as well.\n\n## How to integrate GitLab and Logz.io\n\nThe steps outlined below presume the following:\n\n* You have an [Omnibus GitLab](https://docs.gitlab.com/omnibus/) installation up and running. If you haven't installed GitLab already, visit the [installation page](/installation/).\n* You have an ELK Stack up and running (either your own ELK deployment or a Logz.io account). We will be using Filebeat to ship the logs into Elasticsearch, so Logstash is only required if you want to apply advanced parsing to the data.\n\n### GitLab logs\nAs mentioned above, GitLab has an [advanced logging framework](https://docs.gitlab.com/ee/administration/logs.html) that ships a variety of different system logs.\n\nOf course, what log data you want to ship is entirely up to you. You can ship all the log data, or you can be a bit more selective. These logs can be pretty verbose, so depending on storage and retention considerations, it’s good practice to first understand what logs you need to monitor in the first place.\n\nThe Filebeat configurations provided below are designed for shipping the following logs.\n\n### production_json.log\nThis JSON-formatted log records requests sent by GitLab to the Ruby controllers. Here is a sample log:\n\n```json\n{\"method\":\"GET\",\"path\":\"/-/metrics\",\"format\":\"html\",\"controller\":\n\"MetricsController\",\"action\":\"index\",\"status\":200,\"duration\":1.69,\n\"view\":0.23,\"db\":0.0,\"time\":\"2017-12-26T14:47:49.505Z\",\"params\":{},\n\"remote_ip\":null,\"user_id\":null,\"username\":null}\n```\n\nAs you can see, the information in the log includes the request method, the controller, the action performed, the request status, duration, remote IP, and more.\n\nThe location of the file will vary according to your installation types. In the case of the Omnibus GitLab packages (recommended installation), the file will reside at:\n\n```\n/var/log/gitlab/gitlab-rails/production_json.log\n```\n\n### production.log\nThis is a plain text log file that contains information about all performed requests. It includes the request URL, type, and origin IP as well the parts of code that serviced it. The log also provides details on all SQL requests and how long they took. Here is a sample log:\n\n```\nCompleted 200 OK in 1ms (Views: 0.2ms | ActiveRecord: 3.2ms |\nElasticsearch: 1.5ms)\n```\n\nAgain, the location of the file varies. In the case of the GitLab Omnibus packages, the file resides at:\n\n```\n/var/log/gitlab/gitlab-rails/production.log\n```\n\n### api_json.log\nA specific, JSON-formatted, file for logging API requests only.\n\n```json\n{\"time\":\"2017-12-10T18:30:11.219Z\",\"severity\":\"INFO\",\"duration\":5.22,\n\"db\":0.82,\"view\":10.11,\"status\":200,\"method\":\"POST\",\"path\":\"/api/v4/\ninternal/allowed\",\"params\":{\"action\":\"git-upload-pack\",\"changes\":\"_any\",\"\n\u003Cspan style=\"font-weight: 400;\">project\":\"hello-world\",\"protocol\":\"ssh\",\"env\":\"{}\",\"key_id\":\"[FILTERED]\"\n,\"secret_token\":\"[FILTERED]\"},\"host\":\"127.0.0.1\",\"ip\":\"127.0.0.1\",\"ua\":\"Ruby\"}\u003C/span>\n```\n\nLocation:\n\n```\n /var/log/gitlab/gitlab-rails/api_json.log\n```\n\n### application.log\nThis plain text log file tracks GitLab actions such as adding a new user, creating a new project or group, and so forth. Can act as an audit trail for monitoring user activity.\n\nExample:\n\n```\nDecember 24, 2017 15:10: User Created: username=dbirtin email=xxx@gmail.com\nip=xx.xx.xxx.xx confirmed:true\n```\n\nLocation:\n```\n/var/log/gitlab/gitlab-rails/application.log\n```\n\nIn any case, I recommend reading GitLab’s [excellent documentation](https://docs.gitlab.com/ee/administration/logs.html) to read up on these log files and the information included in them before commencing.\n\n### Configuring Filebeat\n\nFilebeat is a log shipper belonging to the Beats family of shippers. Written in Go and extremely lightweight, Filebeat is the easiest and most cost-efficient way of shipping log files into the ELK Stack.\n\nIf you haven’t already installed Filebeat, here are some instructions (for Debian):\n\n```\ncurl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-6.1.1-amd64.deb\nsudo dpkg -i filebeat-6.1.1-amd64.deb\n```\n\nOpen up the Filebeat configuration file at: `/etc/filebeat/filebeat.yml`:\n\n```\nsudo vim /etc/filebeat/filebeat.yml\n```\n\nThe following configuration defines the different GitLab files to track and ship into ELK. I’ve defined a prospector for each log type so I can add custom fields to each. Alternatively, I could have defined one prospector for all of the files.\n\n```\nfilebeat.prospectors:\n- type: log\n  enabled: true\n  paths:\n    - /var/log/gitlab/gitlab-rails/production_json.log\n  fields:\n    log: production_json\n  json.keys_under_root: true\n- type: log\n  enabled: true\n  paths:\n    - /var/log/gitlab/gitlab-rails/production.log\n  fields:\n    log: production\n- type: log\n  enabled: true\n  paths:\n    - /var/log/gitlab/gitlab-rails/api_json.log\n  fields:\n    log: api_json\n  json.keys_under_root: true\n- type: log\n  enabled: true\n  paths:\n    - /var/log/gitlab/gitlab-rails/application.log\n  fields:\n    log: application\noutput.elasticsearch:\n  # Array of hosts to connect to.\n  hosts: [\"localhost:9200\"]\n```\n\nStart Filebeat with:\n\n```\nsudo service filebeat start\n```\n\nAfter a while, a new index will be created and you can define a new index pattern (filebeat-*) in Kibana to begin analyzing the data.\n\n### Shipping to Logz.io\nIf you are using Logz.io, a few small modifications need to be applied to establish the logging pipeline.\n\nFirst, you will need to download an SSL certificate to use encryption:\n\n```\nwget https://raw.githubusercontent.com/logzio/public-certificates/master/COMODORSADomainValidationSecureServerCA.crt\n\nsudo mkdir -p /etc/pki/tls/certs\n\nsudo cp COMODORSADomainValidationSecureServerCA.crt /etc/pki/tls/certs/\n```\n\nYou can now edit the Filebeat configuration file. If you like, you can make use of the Logz.io Filebeat wizard to generate the FIlebeat YAML file automatically (available in the Filebeat section, under Log Shipping in the UI).\n\nEither way, the configurations should look something like this:\n\n```\nfilebeat:\n  prospectors:\n    -\n      paths:\n        - /var/log/gitlab/gitlab-rails/production_json.log\n      fields:\n        logzio_codec: json\n        token: \u003CyourToken>\n        type: gitlab-production-json\n      fields_under_root: true\n      encoding: utf-8\n      ignore_older: 3h\n    -\n      paths:\n        - /var/log/gitlab/gitlab-rails/production.log\n      fields:\n        logzio_codec: plain\n        token: \u003CyourToken>\n        type: gitlab-production\n      fields_under_root: true\n      encoding: utf-8\n      ignore_older: 3h\n    -\n      paths:\n        - /var/log/gitlab/gitlab-rails/api_json.log\n      fields:\n        logzio_codec: json\n        token: \u003CyourToken>\n        type: gitlab-api-json\n      fields_under_root: true\n      encoding: utf-8\n      ignore_older: 3h\n    -\n      paths:\n        - /var/log/gitlab/gitlab-rails/application.log\n      fields:\n        logzio_codec: plain\n        token: \u003CyourToken>\n        type: gitlab-application\n      fields_under_root: true\n      encoding: utf-8\n      ignore_older: 3h\n  registry_file: /var/lib/filebeat/registry\noutput:\n  logstash:\n    hosts: [\"listener.logz.io:5015\"]\n    ssl:\n      certificate_authorities: ['/etc/pki/tls/certs/COMODORSADomainValidationSecureServerCA.crt']\n```\n\nThe main differences are:\n\n* Logz.io specific fields added to each prospector. Replace \u003CyourToken> with your Logz.io account token (can be found in the Logz.io UI, under Settings).\n* The output section defines the Logz.io listener and the SSL certificate to use.\n\nOnce you start (or restart) Filebeat, the GitLab logs will begin to show up in Logz.io.\n\n### Analyzing the GitLab logs\nNow that your logging pipeline is up and running, it’s time to look into the data with some simple analysis operations in Kibana.\n\nSome of the fields can be used to get some visibility into the logs. Adding, for example, the ‘type’ field (the ‘log’ field in case you are using your own ELK), helps give the logs some context.\n\nWe can use Kibana queries to search for specific log data. Say, for example, you want to take a look at failed logins into the system. To do this, we would use this combination of a field-level and free-text search:\n\n```\ntype:gitlab-application AND \"failed\"\n```\n\n![Analyzing logs](https://about.gitlab.com/images/blogimages/monitoring-your-gitlab-environment-with-the-elk-stack/analyzing-logs.png){: .shadow.center}\n\nAnother example could be querying Elasticsearch for error responses for GitLab requests:\n\n```\ntype:gitlab-production-json AND status:[400 TO *]\n```\n\n![GitLab requests](https://about.gitlab.com/images/blogimages/monitoring-your-gitlab-environment-with-the-elk-stack/gitlab-requests.png){: .shadow.center}\n\nUsing Kibana’s visualization capabilities, you can create a series of simple charts and metric visualizations for giving you a nice overview of your GitLab environment. Here are a few examples.\n\n### Visualizing commits\nWhat organization does not want to monitor its team’s productivity? A simple metric visualization will give you a counter on how many commits were performed by your team:\n\n![Fourteen commits](https://about.gitlab.com/images/blogimages/monitoring-your-gitlab-environment-with-the-elk-stack/14.png){: .shadow.center}\n\nLikewise, we can create a line chart visualization that gives us an overview over time of the commits, per user:\n\n![Fourteen commits](https://about.gitlab.com/images/blogimages/monitoring-your-gitlab-environment-with-the-elk-stack/line-chart.png){: .shadow.center}\n\n### Visualizing issues\nIn a similar fashion, you can use Kibana to keep track of opened and closed issues. A simple data table visualization gives us a breakdown of the issues opened:\n\n![Visualize issues](https://about.gitlab.com/images/blogimages/monitoring-your-gitlab-environment-with-the-elk-stack/visualize-issues-1.png){: .shadow.center}\n\nA line chart can give us a depiction of how many issues were opened over time:\n\n![Line chart](https://about.gitlab.com/images/blogimages/monitoring-your-gitlab-environment-with-the-elk-stack/line-1.png){: .shadow.center}\n\nThe list goes on. You can monitor projects created, merges, user activity, CI/CD processes, and more. The logs generated by GitLab include a wealth of information that can be tapped into for monitoring, and adding these visualizations into one Kibana dashboard gives you a nice overview of your environment.\n\n![End dashboard](https://about.gitlab.com/images/blogimages/monitoring-your-gitlab-environment-with-the-elk-stack/end-dashboard.png){: .shadow.center}\n\n### End notes\nThe ELK Stack offers built-in storage, search and visualization features that complement GitLab’s rich logging capabilities. Using Filebeat, building a logging pipeline for shipping data into ELK is simple. If you want to further process the logs, you might want to consider adding Logstash into your pipeline setup.\n\nLogz.io provides some tools to help you hit the ground running – easy integration steps, as well as the monitoring dashboard above. To install the dashboard, simply search for ‘GitLab’ in ELK Apps and hit the install button.\n\nEnjoy!\n\n## About the guest author\n\nDaniel Berman is Product Evangelist at Logz.io. He is passionate about log analytics, big data, cloud, and family and loves running, Liverpool FC, and writing about disruptive tech stuff. Follow him [@proudboffin](https://twitter.com/proudboffin).\n",[232],{"slug":7539,"featured":6,"template":678},"monitoring-your-gitlab-environment-with-the-elk-stack","content:en-us:blog:monitoring-your-gitlab-environment-with-the-elk-stack.yml","Monitoring Your Gitlab Environment With The Elk Stack","en-us/blog/monitoring-your-gitlab-environment-with-the-elk-stack.yml","en-us/blog/monitoring-your-gitlab-environment-with-the-elk-stack",{"_path":7545,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7546,"content":7551,"config":7556,"_id":7558,"_type":16,"title":7559,"_source":17,"_file":7560,"_stem":7561,"_extension":20},"/en-us/blog/polishing-gitlabs-ui-a-new-color-system",{"title":7547,"description":7548,"ogTitle":7547,"ogDescription":7548,"noIndex":6,"ogImage":2284,"ogUrl":7549,"ogSiteName":692,"ogType":693,"canonicalUrls":7549,"schema":7550},"Polishing GitLab’s UI: A new color system","Senior UX Designer Pedro Moreira da Silva takes us on a deep dive into how the UX team improved the GitLab UI’s color palette.","https://about.gitlab.com/blog/polishing-gitlabs-ui-a-new-color-system","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Polishing GitLab’s UI: A new color system\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Pedro Moreira da Silva\"}],\n        \"datePublished\": \"2018-03-29\",\n      }",{"title":7547,"description":7548,"authors":7552,"heroImage":2284,"date":7553,"body":7554,"category":14,"tags":7555},[5678],"2018-03-29","\nWe receive a lot of feedback from our users and the broader community. After\nhearing that there is a perceived lack of consistency and quality in GitLab’s\nUI, we decided to take a look at our _color palette_.\n\n\u003C!-- more -->\n\nAesthetic aspects like this are a fundamental part of the UI. If we don’t get\nthese right, everything else in the UI won’t feel, look, or behave correctly.\nLike a house, these aesthetics are the foundation upon which everything else is\nbuilt.\n\nOur color palette had various issues, so we started by:\n\n- [building a better palette][ce#28614] that aligned with our goals,\n- and [defining a color priority system][ce#31094] that helped us move forward.\n\n## Why start with colors?\n\nThere are many aesthetic aspects to a UI. So why tackle colors first? Well…\n\n- **Colors are easy to change**: it’s just a matter of changing simple values in\n  our [`variables.scss`](https://gitlab.com/gitlab-org/gitlab-ce/blob/1553a34dbff167978f5dc81cc3a21e0b3b2b2bfa/app/assets/stylesheets/framework/variables.scss#L14)\n  file.\n- **Color changes don’t affect layout**: we weren’t reinventing the wheel, so\n  these changes wouldn’t influence the layout and spacing between elements like\n  typography can.\n\nAnd, more subjectively, colors have a huge impact on the perception of a UI.\nIt’s said that 90 percent of information entering the brain is visual and color\nis an attention-grabbing device.\n\n## Issues with the previous color palette\n\n![Previous color palette](https://about.gitlab.com/images/blogimages/polishing-gitlabs-ui-a-new-color-system/prev-palette.png)\n\n### It didn’t extend the brand colors\n\nThey weren’t in line with our [brand colors](https://gitlab.com/gitlab-com/gitlab-artwork/blob/9b07772f44a9fa51f395a95928a6e41c61a5b1cb/colors),\nwith the most obvious example being the pinkish-red normally associated with\nnegative aspects like errors or irreversible actions. We already have a red from\nour brand, so why use a different one?\n\n### There were too many similar colors\n\nWith so many colors, it wasn’t easy to tell them apart. They were so similar\nthat they no longer brought value to the table, just more guesswork and\nmaintenance.\n\n### There wasn’t enough contrast\n\nMany of our color combinations did not meet the contrast ratios defined in the\n[Web Content Accessibility Guidelines (WCAG)][wcag-contrast].\n\nNote that some of these issues were also applicable to grayscale colors (also\ncalled “achromatic”).\n\n## Building a better palette\n\nAt GitLab, we’ve done a lot of things while standing on the shoulders of giants,\naligning with our company value of [boring solutions](https://handbook.gitlab.com/handbook/values/#boring-solutions).\nAs such, one of our initial thoughts was to use an existing color palette,\nsomething that could save us time and maybe serve as the basis for our work.\n\nWe soon found [Open color](https://yeun.github.io/open-color/), an open source\ncolor scheme optimized for UI. It has 13 hues, each with 10 levels of\nbrightness, totaling 130 different colors. All of the values are there, it would\nbe easy for our Frontend team to get started by importing it as a dependency.\nThis was starting to look very promising and we were getting excited about this\nquick start.\n\nHowever, the more we thought about our current needs and goals, the more we\nrealized that this approach wasn’t going to work for us. Existing color palettes\nusually had too many colors for our needs and the ones we did need, would have\nto be tweaked to align with our brand colors. All of the upsides of using an\nexisting color palette were now irrelevant.\n\nWe went back to the drawing board, starting with defining the goals we wanted\nour new color palette to achieve:\n\n- Align with and extend our brand colors\n- Have only the hues that we need, the colors that have meaning in the UI\n- Be accessible by passing the WCAG\n\n### 1. Extending the brand\n\nThe first step in creating our new color palette was inspired by “[Add Colors To Your Palette With Color Mixing][viget-article],”\nwhere we used [ColorSchemer Studio](http://www.colorschemer.com/osx_info.php)\nto generate this color wheel from the [three brand colors](https://gitlab.com/gitlab-com/gitlab-artwork/blob/9b07772f44a9fa51f395a95928a6e41c61a5b1cb/colors)\nand the [primary purple used on this site](https://gitlab.com/gitlab-com/www-gitlab-com/blob/9c4a9b653f013483d5053c1da30cba6d4bb96bd5/source/stylesheets/_variables.scss#L16):\n\n{: .text-center}\n![Color wheel generated from the brand colors](https://about.gitlab.com/images/blogimages/polishing-gitlabs-ui-a-new-color-system/color-wheel.png){:style=\"width:350px\"}\n\nInitial colors were separated by even intervals of hue and manually tweaked. In\nthe image above, the matching brand colors are next to the wheel for reference.\n\n### 2. Cutting the rainbow\n\nThen, we generated tints and shades for some of the hues in that color wheel:\ngreen, blue, purple, red and orange.\n\n{: .text-center}\n![Tints and shades](https://about.gitlab.com/images/blogimages/polishing-gitlabs-ui-a-new-color-system/tints-shades.png){:style=\"width:451px\"}\n\nThese were first obtained from the [Material Design Palette Generator](http://mcg.mbitson.com/)\nand then tweaked manually using [Colorizer](http://colorizer.org/) and Eric\nMeyer’s [Color Blender](https://meyerweb.com/eric/tools/color-blend). The dark\norange colors are a good example of manual tweaking as they initially looked\nvery “muddy.”\n\nIt’s important to consider the number of tints and shades that you need, as that\naffects the flexibility when applying those colors. Our guiding principle here\nwas to provide clear and visible contrast between each step of the scale. If we\nhad steps that were too similar, the difference wouldn’t be noticeable, which\nmeant that there was no value in having those colors.\n\nWe didn’t want all of the colors of the rainbow, just the ones that _carry\nmeaning effectively_. We want to be able to communicate states and actions by\napplying colors to elements in the UI (e.g. informational elements are\nassociated with blue). If you have too many similar colors in a UI, like green\nand lime, you’re expecting too much not only of your users but also of your\nteam. On the one hand, most of your users won’t notice the difference between\ncolors when placed in a complex UI, so they also won’t pick up the different\nmeanings. On the other hand, your team will have more work learning, working\nwith, and maintaining unnecessary colors.\n\nAdditionally, we shouldn’t rely on color alone to communicate something, so\nthat’s also another point for not having too many similar colors. This is\nactually one of the success criteria of the WCAG about the [use of color](https://www.w3.org/TR/UNDERSTANDING-WCAG20/visual-audio-contrast-without-color.html):\n\n> Color is not used as the only visual means of conveying information,\n> indicating an action, prompting a response, or distinguishing a visual\n> element.\n\n### 3. Colors for everyone\n\nUsing a small set of colors which allows for better memorization and recognition\nis already a good step towards a more usable product, but it’s not enough.\n\n[Evaluating, testing, and prioritizing accessibility problems](https://gitlab.com/groups/gitlab-org/-/epics/31)\nis one of our main initiatives here at GitLab. Establishing contrast between\ntext and background is one of the key aspects of accessibility and, as we saw\nbefore, our previous color palette didn’t meet the [WCAG contrast\nratios][wcag-contrast]. So, as we were defining our new color palette, we\ncontinually tested the colors using the [WebAIM Color Contrast Checker](https://webaim.org/resources/contrastchecker/).\n\nAlong the way, we hit a problem: combinations of _white_ text over _green_ or\n_orange_ backgrounds did not pass **WCAG level AA for small text**. This was an\nissue because we wanted to keep a uniform “vibrancy” and “pop” throughout all\ncolors. While the colors looked uniform to our human eye, the WCAG test didn’t\n“see” them as we did. Would we be forced to “break” this visual consistency and\nuse darker shades for those colors? Not only that, but this would render them too\ndark to _carry meaning effectively_. In the following example, the “success”\nmeaning of green or the “warning” meaning of orange become less immediate as\ntheir contrast increases.\n\n![Warning and success elements can be more or less noticeable but that affects the result of the WCAG contrast tests](https://about.gitlab.com/images/blogimages/polishing-gitlabs-ui-a-new-color-system/problematic-colors.png)\n\nWe found an interesting take on this at the [Google Design](https://design.google/)\nwebsite, which intentionally uses colors that at least pass **AA for large\ntext**:\n\n> Due to this site’s purpose being a source for visual design reference\n> and inspiration, we felt it was acceptable not to target a stronger color\n> contrast level. — [Behind the Code — Google Slash Design Accessibility](http://www.instrument.com/articles/google-slash-design-accessibility)\n\nConsidering our audience and user base, should we be rigid and enforce **AA\nlevel for small text**? As a first step towards better color contrasts, we\ndecided to set our minimum at **AA for large text**, even for _small text_. For\ngrays, we [tested and tweaked their contrast against light gray backgrounds][ce#36675],\nas that is a common color used to differentiate regions in the UI.\n\n{: .text-center}\n![All tints and shades with corresponding WCAG levels, including grays](https://about.gitlab.com/images/blogimages/polishing-gitlabs-ui-a-new-color-system/tints-shades-wcag.png){:style=\"width:567px\"}\n\n## Color priorities\n\nSo, after all this work, we introduced a wide range of color tints and shades\nwith the new color palette. The problem was that there was no guidance for using\nthem. Some color decisions are fairly quick and intuitive, but we wanted to\nstandardize and make the color selection process as objective as possible for\neveryone, even developers. We want to give people the chance to make a decision\nwithout imposing approval or reviews by the UX team. We want to be [lean, efficient, and focus on results](https://handbook.gitlab.com/handbook/values/).\n\nSome questions that we should be able to answer:\n\n- “I need to use one blue, which shade should I pick?”\n- “This UI component needs three contrasting shades of green. Can I pick\n  whichever I want?”\n\nThe [Material Design colors](https://material.io/guidelines/style/color.html)\nhave been a great source of inspiration for us. They follow the numeric naming\nconventions used by the [CSS `font-weight` property](https://www.w3.org/TR/css-fonts-3/#font-weight-prop),\nwhere a higher value equals a higher degree of blackness. So, we’ve named our\ncolors from the lightest (**50**) to the darkest (**950**).\n\nOn top of this naming scheme, we’ve defined a system of color priorities. This\nis similar to how different font weights are used to create contrasting\ntypography that communicates hierarchy.\n\nWe can apply this same logic to colors, as seen in the image below, by tagging\nthem according to their priority: from **1** to **4**. If you need guidance, the\npriorities can help you make better choices. When choosing how to apply color to\na UI component:\n\n- You start at priority **1**, which is the medium weight **500**. There’s only\n  one shade with priority 1 per color (the “default” shade).\n- For more shades of the same color, you could then choose from the next\n  priority level, number **2**, which can either be **300** (lighter) or **700**\n  (darker). And so forth for even lighter or darker shades.\n\n![All tints and shades with corresponding priorities, names, and WCAG levels, including grays](https://about.gitlab.com/images/blogimages/polishing-gitlabs-ui-a-new-color-system/color-priorities-system.png)\n\n## What’s next\n\nAlong the way, we’ve learned that [mixing colors and defining color palettes](https://books.google.com/books?id=R4qwDQAAQBAJ)\nis not only science, nor only art, it’s a subjective balance on the human mind.\nColor harmony depends on many factors, like culture, age, social status, or even\nthe [designer’s intent](http://www.aic-color.org/journal/v1/jaic_v1_review.pdf).\n\nWe’ll have to see how people use the 11 tints and shades and how they’re applied\nin our [Design System][ds]. This is a constant evolution, and we’re always\niterating (as we should be).\n\nNext, we’re going to review our [color meaning guidelines](https://design.gitlab.com/)\nand be more active in their usage, not only in the product but also in our\n[Design System][ds] and [pattern library](https://gitlab.com/gitlab-org/gitlab-design/blob/master/gitlab-elements.sketch).\n\nA new color palette and a color priority system are seemingly small steps\ntowards a better user experience throughout GitLab, but they do make a big\ndifference, for our users, our team, and every contributor. This is the first\ninitiative to polish our UI styles, next we’re implementing our new [type scale](https://gitlab.com/gitlab-org/gitlab-ce/issues/24310)\n– which will deserve a dedicated blog post.\n\nIf you have any questions, feel free to [post a comment on the community forum](https://forum.gitlab.com/new-topic?tags=blog-feedback),\n[tweet at us](https://twitter.com/gitlab), or join the discussion on the\nfollowing issues:\n\n- [Change chromatic/full colors to a more harmonious palette][ce#28614]\n- [Define color priorities][ce#31094]\n- [Define a pure gray color scale][ce#36675]\n",[915,959,1144,4300],{"slug":7557,"featured":6,"template":678},"polishing-gitlabs-ui-a-new-color-system","content:en-us:blog:polishing-gitlabs-ui-a-new-color-system.yml","Polishing Gitlabs Ui A New Color System","en-us/blog/polishing-gitlabs-ui-a-new-color-system.yml","en-us/blog/polishing-gitlabs-ui-a-new-color-system",{"_path":7563,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7564,"content":7570,"config":7576,"_id":7578,"_type":16,"title":7579,"_source":17,"_file":7580,"_stem":7581,"_extension":20},"/en-us/blog/use-cases-for-epics",{"title":7565,"description":7566,"ogTitle":7565,"ogDescription":7566,"noIndex":6,"ogImage":7567,"ogUrl":7568,"ogSiteName":692,"ogType":693,"canonicalUrls":7568,"schema":7569},"How the GitLab UX team uses epics","UX Manager Sarrah Vesselov shares how the UX team is using epics to manage their workflow.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680187/Blog/Hero%20Images/how-ux-team-uses-epics.jpg","https://about.gitlab.com/blog/use-cases-for-epics","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How the GitLab UX team uses epics\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarrah Vesselov\"}],\n        \"datePublished\": \"2018-03-19\",\n      }",{"title":7565,"description":7566,"authors":7571,"heroImage":7567,"date":7573,"body":7574,"category":14,"tags":7575},[7572],"Sarrah Vesselov","2018-03-19","\n\nOne of the challenges for UX here at GitLab is how to work iteratively, making the smallest changes possible, while maintaining a holistic view of the application. As the manager for the UX department, I was curious to see how we could use [epics](https://docs.gitlab.com/ee/user/group/epics/) to better plan and track UX efforts over time.\n\n\u003C!-- more -->\n\n## What are epics?\n\nThe term 'epic' is most commonly associated with Agile methodology. In Agile, an epic is a collection of user stories that describe a larger user flow, typically consisting of multiple features. So, what does ‘epic’ mean at GitLab? Here, epics contain a title and description, much like an issue, and allow you to attach multiple child issues to indicate hierarchy. In short, an epic is a feature that allows you to manage a portfolio of projects more efficiently and with less effort by tracking groups of issues that share a theme, across projects and milestones.\n\nWhat this meant for the UX team was that we finally had an efficient way to plan, track, and execute a group of thematically related issues. Take the merge request page for example. We have over 100 issues related to UX improvements for this feature alone! Each issue, taken on its own, represents just one piece of a much bigger picture. Epics would allow us to define the goal we have for the entire page and organize issues specific to that effort.\n\n## Getting started with epics\n\nTo get started with epics, we put together a UX strategy template. This template would be filled out and added to the epic description. The template defined the following:\n\n- **Challenges:** What user problem are we trying to solve? What business problem are we trying to solve? Are there obstacles standing in the way?\n\n- **Vision:** What do we want to achieve?\n\n- **Focus Areas:** What will we focus our attention on to have the most impact?\n\n- **Mission:** How will we achieve this goal?\n\n- **Activity/Deliverables:** What will we do and what will we deliver?\n\n- **Measure:** How will we measure success qualitatively and quantitatively?\n\nThe template also includes links to any relevant [personas](/blog/discovering-gitlabs-personas/) and [research](/blog/conducting-remote-ux-research/) we should consider when working toward the overall goal.\n\n## Creating our first epic\n\nWith the template ready to go, we chose the merge request page as our first area of focus. We started by reviewing the existing UX research for this page. It was essential to use data to understand the pain points and opportunities. We also examined the entire backlog of issues related to this page, matching existing issues to the research findings. With the significant pain points identified, we were able to fill out the template and create our very first epic.\n\n![Merge Request Epic](https://about.gitlab.com/images/blogimages/epics-ux.png){: .shadow}\n\nWith a holistic view of what we wanted to achieve, we could go back and find issues in the backlog that were critical to the vision. These issues were added to the epic and ordered according to priority. As we discover new information, we can reorder these issues to match the change in priority. As the scope expands, we can aggressively break things out into new epics for development at a later time or parallel to the existing epic. In the future, [sub-epics](https://gitlab.com/gitlab-org/gitlab-ee/issues/4282) will make this process even more fluid.\n\n![Merge Request Epic Issues](https://about.gitlab.com/images/blogimages/epic-ux-issues.png){: .shadow}\n\n*\u003Csmall>Issues are listed under the epic description. They can be easily reordered by dragging and dropping them into place.\u003C/small>*\n\nWe also set a time frame for this overall effort to be achieved. Having a set timeframe allows us to resource plan with the product team and make adjustments accordingly.\n\n## Looking ahead\n\nSo far, epics have proven to be well suited for planning long-term UX efforts. It has allowed us to maintain a holistic view of product area while still working iteratively. Epics also give other departments better visibility into what UX considers important. We are already looking beyond the merge request page and using epics to plan other efforts spanning multiple milestones. Epics are still relatively new, and there are many additions yet to come. In future releases, they will support [labeling](https://gitlab.com/gitlab-org/gitlab-ee/issues/4032), [discussions](https://gitlab.com/gitlab-org/gitlab-ee/issues/3889), [project-level epics](https://gitlab.com/gitlab-org/gitlab-ee/issues/4019), and integration with [issues](https://gitlab.com/gitlab-org/gitlab-ee/issues/4684) and [roadmaps](https://gitlab.com/gitlab-org/gitlab-ee/issues/3559).\n\n![Roadmap feature for epics](https://about.gitlab.com/images/blogimages/roadmaps.png){: .shadow}\n\nThe [Roadmap feature](https://gitlab.com/gitlab-org/gitlab-ee/issues/3559), pictured above, is set to be released in 10.5. Roadmaps offer a graphical, high level overview of an epic, or multiple epic's, goals and deliverables presented on a timeline. The blue roadmap bar and the epic list item are clickable and will navigate to that epic's detail page.\n\n## Resources\n- [Portfolio Management Roadmap](/direction/#portfolio-management-and-issue-management )\n\nPhoto by [Dmitri Popov](https://unsplash.com/) on [Unsplash](https://unsplash.com/search/photos/scale?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[1646,727,1347,1144],{"slug":7577,"featured":6,"template":678},"use-cases-for-epics","content:en-us:blog:use-cases-for-epics.yml","Use Cases For Epics","en-us/blog/use-cases-for-epics.yml","en-us/blog/use-cases-for-epics",{"_path":7583,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7584,"content":7590,"config":7595,"_id":7597,"_type":16,"title":7598,"_source":17,"_file":7599,"_stem":7600,"_extension":20},"/en-us/blog/the-on-call-handover-at-gitlab",{"title":7585,"description":7586,"ogTitle":7585,"ogDescription":7586,"noIndex":6,"ogImage":7587,"ogUrl":7588,"ogSiteName":692,"ogType":693,"canonicalUrls":7588,"schema":7589},"How our production team runs the weekly on-call handover","Senior Production Engineer John Jarvis explains our handover process for on-call incidents in a fully remote and distributed team.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678661/Blog/Hero%20Images/production-on-call-handover.jpg","https://about.gitlab.com/blog/the-on-call-handover-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How our production team runs the weekly on-call handover\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Jarvis\"}],\n        \"datePublished\": \"2018-03-14\",\n      }",{"title":7585,"description":7586,"authors":7591,"heroImage":7587,"date":7592,"body":7593,"category":14,"tags":7594},[4885],"2018-03-14","\nHow do you manage on-call incidents among a team of eight distributed across three time zones?\nEvery week, production engineers are assigned to the role of handling on-call.\nWith this, comes the [expectation][on-call-expectations] of being available to\nrespond to any issue that results in a critical alert. Additionally,\non-call individuals act as an umbrella for\nother members of the team by triaging and handling all issues\nrelated to GitLab.com infrastructure.\n\n\u003C!-- more -->\n\nThe production team structures on-call shifts so that they follow the sun, to\navoid waking up members of the team in the middle of the night.\nThis works well for GitLab's [remote-only culture](/company/culture/all-remote/) where there are engineers in multiple\ntime zones. Occasionally, an on-call engineer will need to respond to an issue\noutside normal working hours; in these situations, GitLab encourages members to take\n[time off][on-call-time-off] after your shift to recover.\n\n## The on-call handover\n\nAs the team members working on-call shifts are distributed and their working hours don't always overlap, you can see how it would be easy for things to slip through the cracks between one shift and the next. To prevent this happening, once a week, the production team holds a 30-minute meeting called the [on-call handover][on-call-handover].\nOne of the key tenets of GitLab is that [everything starts with an issue][start-with-an-issue], and\nthe on-call handover is no exception!\nFrom a generated report, the team reviews incidents that occurred during the\nlast seven days and decide whether they need additional attention or escalation.\n\nAfter that, we check all GitLab issues with the on-call label to see if there are\nany that need to move from the current shift to the next one. At the end, there\nis a brief review of seven-day graphs. These help us keep an eye out for anything\nanomalous in our key metrics. If there is anything that seems\nout of the ordinary or warrants further investigation, the team will dig into them to see if we can\nidentify the root cause. The production team at GitLab encourages leads of other\ngroups to attend the review, as this helps bring to our attention any particular high-priority\nitems specific to individual services.\n\n## Automating the on-call handover\n\nDrinking our own wine by using GitLab for on-call report generation has proven to\nbe a good way to automate some of the more tedious work of the handover.\nTo aid with this, the production team developed a program\ncalled the [on-call robot assistant][on-call-robot-assistant]. It pulls data\nfrom relevant sources such as PagerDuty, Grafana and GitLab itself to generate a\nreport with a GitLab issue.\n\nThe program automates the following tasks:\n\n* Pulling the last shift's incidents from PagerDuty\n* Generating issue stats from the [production backlog][production-backlog]\n* Display seven-day graphs for the key performance metrics that we are monitoring\n  that are sourced from [GitLab Prometheus][gitlab-prometheus] monitoring\n\n\u003Ccenter>\u003Cimg src=\"/images/blogimages/oncall-robot-tty.gif\" alt=\"oncall-tty\" class= \"shadow\" style=\"width: 600px;\"/>\u003C/center>\n\n*\u003Csmall>Generating an on-call report in a GitLab issue\u003C/small>*\n\nThese data sources are set in a [simple configuration file][ocr-config], making it\neasy to iterate as we add new metrics to monitor.\nAt GitLab, most of what we do is out in the open so our on-call handover reports are\navailable for anyone to check out. If you want to see previous reports from\nthe on-call handovers [check them out in our issue tracker][on-call-reports].\n\nFor example, here is one recent report that shows a report for a previous week:\n\n\u003Ccenter>\u003Cimg src=\"/images/blogimages/oncall-robot-report1.png\" alt=\"oncall-report1\" class= \"shadow\" style=\"width: 600px;\"/>\u003C/center>\n\nAs well as some graphs for key metrics the production team is monitoring:\n\n\u003Ccenter>\u003Cimg src=\"/images/blogimages/oncall-robot-report2.png\" alt=\"oncall-report2\" class= \"shadow\" style=\"width: 600px;\"/>\u003C/center>\n\nWhen the team is finished reviewing the report, the current on-call engineer closes it\nand the shift officially ends.\n\n[Photo](https://unsplash.com/photos/ocs8x33bpMA?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) by Denny Müller on [Unsplash](https://unsplash.com/search/photos/telephone?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n\n\u003C!-- identifiers -->\n\n[on-call-expectations]: /handbook/on-call/#expectations-for-on-call\n[on-call-time-off]: /handbook/paid-time-off/#a-gitlabbers-guide-to-time-off\n[start-with-an-issue]: /handbook/communication/#everything-starts-with-an-issue\n[on-call-robot-assistant]: https://gitlab.com/gl-infra/oncall-robot-assistant\n[production-backlog]: https://gitlab.com/gitlab-com/infrastructure/issues\n[gitLab-prometheus]: https://docs.gitlab.com/ee/administration/monitoring/prometheus/\n[ocr-config]: https://gitlab.com/gl-infra/oncall-robot-assistant/blob/master/oncall-settings-example.yaml\n[on-call-reports]: https://gitlab.com/gitlab-com/infrastructure/issues?scope=all&utf8=%E2%9C%93&state=closed&label_name[]=oncall%20report\n[on-call-report-example]: https://gitlab.com/gitlab-com/infrastructure/issues/3583\n[on-call-handover]: /handbook/engineering/infrastructure/team/reliability/on-call-handover/\n",[915,1286],{"slug":7596,"featured":6,"template":678},"the-on-call-handover-at-gitlab","content:en-us:blog:the-on-call-handover-at-gitlab.yml","The On Call Handover At Gitlab","en-us/blog/the-on-call-handover-at-gitlab.yml","en-us/blog/the-on-call-handover-at-gitlab",{"_path":7602,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7603,"content":7609,"config":7614,"_id":7616,"_type":16,"title":7617,"_source":17,"_file":7618,"_stem":7619,"_extension":20},"/en-us/blog/gitlab-vscode-extension",{"title":7604,"description":7605,"ogTitle":7604,"ogDescription":7605,"noIndex":6,"ogImage":7606,"ogUrl":7607,"ogSiteName":692,"ogType":693,"canonicalUrls":7607,"schema":7608},"A VS Code extension for GitLab: GitLab Workflow","Senior Frontend Engineer Fatih Acet created a VS Code extension, GitLab Workflow, which allows you to do many GitLab-specific tasks quickly and easily.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680196/Blog/Hero%20Images/vs-code-extension-gitlab-workflow.jpg","https://about.gitlab.com/blog/gitlab-vscode-extension","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A VS Code extension for GitLab: GitLab Workflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fatih Acet\"}],\n        \"datePublished\": \"2018-03-01\",\n      }",{"title":7604,"description":7605,"authors":7610,"heroImage":7606,"date":7611,"body":7612,"category":14,"tags":7613},[6394],"2018-03-01","\n\nWe recently did a survey within the Frontend team to see which tools we were using and how we were using them, in order to learn from one another and to build better development workflows. Through this survey, we determined that [Visual Studio Code (VS Code)](https://code.visualstudio.com/) is the most used integrated development environment (IDE) within the team. This led to the idea for a GitLab extension for VS Code that could help reduce context switching and boost productivity.\n\nUpdate: Read [eight tips for using the GitLab VS Code extension](https://about.gitlab.com/blog/vscode-workflows-for-working-with-gitlab/) and about [how GitLab + VS Code can be used for extension development](/blog/vscode-extension-development-with-gitlab/).\n{: .alert .alert-info .text-center}\n\nThis is not a [GitLab feature](/pricing/feature-comparison/) (we're actually working on building our own integrated [web IDE](https://docs.gitlab.com/ee/user/project/web_ide/)), but the extension is a quick and easy way to perform a lot of useful actions you would usually visit [GitLab.com](https://gitlab.com/) to do, directly within your VS Code editor. Watch the demo below and read on for more about how I developed the extension.\n\n## Demo\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/XcxsF0lWBhA\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## First iteration of GitLab Workflow\n\nThis was my first attempt at writing a VS Code extension, and I wanted to build something simple as a first iteration. I built an extension that allowed users to see issues and merge requests assigned to them on GitLab.com. The detailed documentation and powerful APIs of VS Code enabled me to build my first version in less than two hours! It was an enjoyable experience.\n\n## Further iterations\n\nThis led to the creation of my second iteration: showing MR URLs, providing the pipeline status on the status bar, opening the current file and current MR on GitLab.com. I shared this second iteration with my fellow GitLab team-members on our internal Slack and received a lot of positive feedback. After that, I released new iterations and it got more than 5,000 installations in just a month. It was so well received that it was featured on the \"Trending this week\" section of Visual Studio Marketplace and is still currently being featured on the \"Trending this month\" section 🎉\n\n\u003Ccenter>\u003Cimg src=\"/images/blogimages/gitlab-vscode-extension/trending-this-month.png\" alt=\"GitLab Workflow on Visual Studio Marketplace\" style=\"width: 700px;\"/>\u003C/center>{: .shadow}\n\nThe current version of this extension allows you to:\n\n- See pipeline status, open MR and close issue links in the status bar. [Read more](https://gitlab.com/fatihacet/gitlab-vscode-extension/tree/master#status-bar).\n- Automatically update pipeline status on the status bar so you don't need to open GitLab to see your pipeline status.\n- Advanced pipeline actions allow you to view a pipeline on GitLab, create a new pipeline, and retry or cancel current pipeline. [Read more](https://gitlab.com/fatihacet/gitlab-vscode-extension/tree/master#pipeline-actions).\n- Issue and MR search including simple and advanced search. [Read more](https://gitlab.com/fatihacet/gitlab-vscode-extension/tree/master#advanced-search).\n- View an MR and close an issue on GitLab with a single click from your status bar.\n- View an active file on GitLab with highlighting active line number and selected text block. [Read more](https://gitlab.com/fatihacet/gitlab-vscode-extension/tree/master#open-active-file).\n- Create public, internal or private snippet from entire file or selection. [Read more](https://gitlab.com/fatihacet/gitlab-vscode-extension/tree/master#create-snippet).\n- Compare your branch with master and view changes on GitLab. [Read more](https://gitlab.com/fatihacet/gitlab-vscode-extension/tree/master#compare-with-master).\n- Validate GitLab CI configuration file `.gitlab-ci.yml`. [Read more](https://gitlab.com/fatihacet/gitlab-vscode-extension/tree/master#validate-gitlab-ci-configuration).\n\nSee below for more tasks you can perform quickly with the extension.\n\n\u003Ccenter>\u003Cimg src=\"/images/blogimages/gitlab-vscode-extension/gitlab-vscode.png\" alt=\"GitLab Workflow Commands\" style=\"width: 700px;\"/>\u003C/center>{: .shadow}\n\nYou can find the source code [here](https://gitlab.com/fatihacet/gitlab-vscode-extension) and see the extension [on the Visual Studio Marketplace](https://marketplace.visualstudio.com/items?itemName=fatihacet.gitlab-workflow). You can read the documentation [here](https://docs.gitlab.com/ee/user/project/repository/vscode.html) and check the CHANGELOG [here](https://gitlab.com/fatihacet/gitlab-vscode-extension/blob/master/CHANGELOG.md). There is also a [Product Hunt page](https://www.producthunt.com/posts/gitlab-workflow) for the extension.\n\nPhoto by [Iker Urteaga](https://unsplash.com/photos/TL5Vy1IM-uA?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/tools?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[2932,703,232],{"slug":7615,"featured":6,"template":678},"gitlab-vscode-extension","content:en-us:blog:gitlab-vscode-extension.yml","Gitlab Vscode Extension","en-us/blog/gitlab-vscode-extension.yml","en-us/blog/gitlab-vscode-extension",{"_path":7621,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7622,"content":7628,"config":7634,"_id":7636,"_type":16,"title":7637,"_source":17,"_file":7638,"_stem":7639,"_extension":20},"/en-us/blog/how-we-added-eslint-into-vue",{"title":7623,"description":7624,"ogTitle":7623,"ogDescription":7624,"noIndex":6,"ogImage":7625,"ogUrl":7626,"ogSiteName":692,"ogType":693,"canonicalUrls":7626,"schema":7627},"How eslint-plugin-vue improved our code reviews","A few months ago we felt the need to build a style guide for Vue and now are using eslint-vue-plugin, which is saving us time in our code reviews.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680227/Blog/Hero%20Images/code_cover_image.jpg","https://about.gitlab.com/blog/how-we-added-eslint-into-vue","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How eslint-plugin-vue improved our code reviews\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Filipa Lacerda\"}],\n        \"datePublished\": \"2018-02-13\",\n      }",{"title":7623,"description":7624,"authors":7629,"heroImage":7625,"date":7631,"body":7632,"category":14,"tags":7633},[7630],"Filipa Lacerda","2018-02-13","\n\nWe've (finally) integrated [eslint-plugin-vue](https://github.com/vuejs/eslint-plugin-vue) successfully into our codebase!\n\n\u003C!-- more -->\n\nWhen we [added Vue](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/5845) to our codebase back in April 2016, [eslint-plugin-vue](https://github.com/vuejs/eslint-plugin-vue) did not yet [exist](https://github.com/vuejs/eslint-plugin-vue/commit/6a3a6db540e823b51af1e02950896ac9c2b49219) and we had not yet started using [eslint](https://eslint.org/) at all.\n\nOne of the things I love the most about GitLab being an open source tool is that anyone can contribute! [Winnie Hellmann](https://gitlab.com/winh), who has since joined the team, did this amazing work [adding eslint](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/5445) as a community contribution. Thanks Winnie! 🙇‍\n\n## The start of a style guide\n\nAs our Vue codebase grew from a few features to quite a large usage ([issue boards](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/5554), [environments](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/8954), [cycle analytics](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/7366), [pipelines](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/10878)) we noticed that each of our Vue apps followed a different style. At that time we felt the need to [document how to architecture a Vue application](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/8866) to ensure a consistent codebase. Once we defined and documented how to use the component system and Flux architecture [with our codebase](https://docs.gitlab.com/ee/development/fe_guide/vue.html#vue-architecture), we noticed that our Vue code also differed in very small things, such as indentation or the order we declared the methods. These inconsistencies, although small, increased the complexity of the review process and for maintaining a healthy codebase.\n\nWith the goal of decreasing the time we spent reviewing Vue code and debating on each of these aspects, and because at the time there wasn't an official Vue style guide, [we started our own](https://gitlab.com/gitlab-org/gitlab-ce/commit/8c3bdc853a5237a3bef6e26fcf22132db7e8bd9c)! You can check out our documentation [here](https://docs.gitlab.com/e e/development/fe_guide/style_guide_js.html#vue-js). As the Vue community grew, the need for an official style guide and for an eslint plugin for Vue grew with it. Thanks to the wonderful team [Michał Sajnóg](https://github.com/michalsnik), [Toru Nagashima](https://github.com/mysticatea), [Armano](https://github.com/armano2) and [Chris Fritz](https://github.com/chrisvfritz) leading the development of such a tool, we are now able to use it in production! And we even got to act as source of [inspiration for the official one](https://github.com/vuejs/eslint-plugin-vue/issues/77#issuecomment-315834845) ❤\n\n## Adding eslint-vue-plugin\n\nAfter [waiting a couple of months](https://gitlab.com/gitlab-org/gitlab-ce/issues/34312) for a stable version of [eslint-plugin-vue](https://github.com/vuejs/eslint-plugin-vue), we finally gave it a try once version [4.0.0](https://github.com/vuejs/eslint-plugin-vue/releases/tag/v4.0.0) was released.\n\n![EE Conflicts](https://about.gitlab.com/images/eslint-vue-plugin/eslint-conflicts-team-help.png \"EE Conflicts\"){: .shadow}\n\n*\u003Csmall>Frontend team working together to resolve all the vue eslint problems\u003C/small>*\n\nIt took a couple of days to fix all the problems eslint identified in our code, but we were able to successfully add it and thanks to a huge team effort, the second row of conflicts was solved very quickly. Thanks again Luke, Eric, Kushal and José!\n\nNow our review process is even faster, we don't have to manually check for the style guide rules anymore! 🎉\n\n[Cover image](https://pixabay.com/en/computer-computer-code-screen-1209641/) by [Free-Photos](https://pixabay.com/en/users/Free-Photos-242387/) is licensed under [CC0 1.0](https://creativecommons.org/publicdomain/zero/1.0/)\n{: .note}\n",[915,1979],{"slug":7635,"featured":6,"template":678},"how-we-added-eslint-into-vue","content:en-us:blog:how-we-added-eslint-into-vue.yml","How We Added Eslint Into Vue","en-us/blog/how-we-added-eslint-into-vue.yml","en-us/blog/how-we-added-eslint-into-vue",{"_path":7641,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7642,"content":7648,"config":7654,"_id":7656,"_type":16,"title":7657,"_source":17,"_file":7658,"_stem":7659,"_extension":20},"/en-us/blog/crowdin-localization-for-agile-projects",{"title":7643,"description":7644,"ogTitle":7643,"ogDescription":7644,"noIndex":6,"ogImage":7645,"ogUrl":7646,"ogSiteName":692,"ogType":693,"canonicalUrls":7646,"schema":7647},"Automate your localization with GitLab + Crowdin","Complete your development workflow by integrating GitLab with Crowdin to help your product speak the same language as the people it’s built for.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680247/Blog/Hero%20Images/gitlab-crowdin-cover.png","https://about.gitlab.com/blog/crowdin-localization-for-agile-projects","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Automate your localization with GitLab + Crowdin\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Khrystyna Humenna\"}],\n        \"datePublished\": \"2018-02-06\",\n      }",{"title":7643,"description":7644,"authors":7649,"heroImage":7645,"date":7651,"body":7652,"category":14,"tags":7653},[7650],"Khrystyna Humenna","2018-02-06","\n\nWhen developing products like web apps, games, and alike, you have to face the fact that you enter the international market the moment your product is first mentioned on the web. Once you decide to promote your product internationally and expand its reach, you should add localization to your workflow. Crowdin's integration with GitLab means you can seamlessly automate your localization process.\n\n\u003C!-- more -->\n\nA developer needs roughly 15 minutes to add some new texts. If a product is to be translated into, let’s say, 10 languages, the deployment is delayed for at least a week, as after the code is built, translators need time to make the translations. If several developers work on the product updates simultaneously, translations for those would delay each deployment even more. In this scenario, any team is quite unlikely to stay Agile.\n\n## Do localization in an Agile way with the GitLab + Crowdin integration\n\nTo be able to constantly ship minimum viable changes, localization should be a part of the development process. Crowdin is a localization management platform that completes your workflow by synchronizing translatable and translated files between your GitLab repository and your Crowdin localization project. This way multiple translators and developers can work simultaneously to deliver great results in less time.\n\n### Integrate Crowdin with your repository\n\nFirst of all, log into [crowdin.com](https://crowdin.com/) (you can use your GitLab account for this as well), then create a localization project or integrate an existing one. In the Crowdin project settings, you will be able to set up this integration and define whether the translatable texts should be uploaded to Crowdin from the master branch or from the development branches.\n\nThen select the file path for translations, as once they are made in Crowdin, they will be automatically added to your GitLab repository in a merge request. Each time the automatic file sync is completed the merge request in GitLab will be updated with new translations, or a new merge request will be created if the previous one was already merged.\n\nThis allows you to review translations before merging them to master and receive up-to-date translations in a few minutes after they are made, as the file sync is completed automatically. Read more details on [how to set up the GitLab + Crowdin integration](https://support.crowdin.com/gitlab-integration/).\n\n\u003Cimg src=\"/images/blogimages/gitlab_crowdin_integration.png\" alt=\"GitLab Crowdin integration\" style=\"width: 700px;\"/>{: .shadow}\n\n*\u003Csmall>A view of the integration with GitLab in Crowdin during automated synchronization.\u003C/small>*\n\n\n### Work with Agile translators\n\nWhether you decide to translate your project with the help of in-house translators or a translation agency, they should be Agile so they can make translations of different scope at any time, not just one project at a time.\n\nCrowdin project notification settings allow you to notify translators and other project members every time new texts are added to the project. This way they’ll be able to start making translations once new texts are synchronized with a project in Crowdin. You in your turn will be able to keep an eye on their contributions and overall project activity using Crowdin project reports.\n\n\u003Cimg src=\"/images/blogimages/crowdin_project_reports.png\" alt=\"Crowdin project reports\" style=\"width: 700px;\"/>{: .shadow}\n\n*\u003Csmall>Use project reports in Crowdin to easily track the main activities such as translations and approvals.\u003C/small>*\n\n## Key points to remember about localization\n\n### Localization is a continuous process\n\nIf your product changes and evolves often you should keep the localized versions up to date as well. Each time you add some new functionality, scheduled update, or a small change, the new texts should be localized as well.\n\n### The product’s UI should be flexible\n\nMake sure to use responsive design, as the same phrase in different languages might take up more or less space than the primary language of your product.\n\n### Consistency is important\n\nCreating a style guide, glossary, and using a Translation Memory is a great idea if you want to speed up the translation process and receive consistent translations at the same time.\n\nCrowdin comes with a free 10-day trial and is free of charge for open source projects. [Give it a try!](https://crowdin.com/join)\n",[232],{"slug":7655,"featured":6,"template":678},"crowdin-localization-for-agile-projects","content:en-us:blog:crowdin-localization-for-agile-projects.yml","Crowdin Localization For Agile Projects","en-us/blog/crowdin-localization-for-agile-projects.yml","en-us/blog/crowdin-localization-for-agile-projects",{"_path":7661,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7662,"content":7668,"config":7674,"_id":7676,"_type":16,"title":7677,"_source":17,"_file":7678,"_stem":7679,"_extension":20},"/en-us/blog/test-all-the-things-gitlab-ci-docker-examples",{"title":7663,"description":7664,"ogTitle":7663,"ogDescription":7664,"noIndex":6,"ogImage":7665,"ogUrl":7666,"ogSiteName":692,"ogType":693,"canonicalUrls":7666,"schema":7667},"Test all the things in GitLab CI with Docker by example","Running tests is easier than you think – guest author Gabriel Le Breton shares his presentation about testing everything automatically with GitLab CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680261/Blog/Hero%20Images/test-all-the-things-in-gitlab-ci-with-docker-by-example.jpg","https://about.gitlab.com/blog/test-all-the-things-gitlab-ci-docker-examples","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Test all the things in GitLab CI with Docker by example\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gabriel Le Breton\"}],\n        \"datePublished\": \"2018-02-05\",\n      }",{"title":7663,"description":7664,"authors":7669,"heroImage":7665,"date":7671,"body":7672,"category":14,"tags":7673},[7670],"Gabriel Le Breton","2018-02-05","\n\nDo you write tests? Or do you skip them because it’s too complicated to run? Or maybe developers on your team just don’t care? You should take a few minutes and set up CI so you can enforce good practices. Good news, you can test [all the things](http://knowyourmeme.com/memes/all-the-things) automagically in [GitLab CI/CD](/solutions/continuous-integration/) with Docker and very little effort 🤘\n\n\u003C!-- more -->\n\nI recently gave a presentation at the [SagLacIO](http://saglac.io/) about [GitLab CI/CD](/solutions/continuous-integration/).\n\n## Getting started\n\nFirst, you’ll need an account at [GitLab.com](https://gitlab.com/). If you don’t already have one, you can open an account with no problem. [GitLab’s free tier](/stages-devops-lifecycle/) gives you a ton of features, unlimited free hosted repositories, 2,000 CI build minutes per month, etc. You can even use your own task runners in case you bust that limit.\n\n### Useful links\n\n- [GitLab.com](https://gitlab.com/)\n- [GitLab CI/CD documentation](https://docs.gitlab.com/ee/ci/) 📗\n- [.gitlab-ci.yml documentation](https://docs.gitlab.com/ee/ci/yaml/) 📕\n- [.gitlab-ci.yml linter: gitlab.com/ci/lint](https://gitlab.com/ci/lint/) ✅\n- [gitlab-ci nodejs example project](https://gitlab.com/gableroux/gitlab-ci-example-nodejs)\n- [gitlab-ci Docker example project](https://gitlab.com/gableroux/gitlab-ci-example-docker)\n- [gitlab-ci django example project](https://gitlab.com/gableroux/gitlab-ci-example-django)\n- [Unity3D Docker project](https://gitlab.com/gableroux/unity3d) running in gitlab-ci and published to [Docker Hub](https://hub.docker.com/r/gableroux/unity3d/)\n- [How to publish Docker images to Docker Hub from gitlab-ci on Stack Overflow](https://stackoverflow.com/questions/45517733/how-to-publish-docker-images-to-docker-hub-from-gitlab-ci)\n\n## Here go the slides\n\nScroll through the slides from my presentation on GitLab CI/CD at SagLacIO, you’ll have fun 🤘\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://docs.google.com/presentation/d/10835yig54EbR_OQcxSXURkPk_0zkhLxaWHdRdXb-yWw/embed?start=false&amp;loop=false&amp;delayms=3000\" frameborder=\"0\" width=\"1280\" height=\"749\" allowfullscreen=\"true\" mozallowfullscreen=\"true\" webkitallowfullscreen=\"true\">\u003C/iframe>\n\u003C/figure>\n\nIf you have suggestions, feel free to poke me or [open an issue](https://github.com/GabLeRoux/gableroux.github.io/issues).\n\n *[Test all the things in GitLab CI with Docker by example](https://gableroux.com/saglacio/2018/01/16/test-all-the-things-in-gitlab-ci-with-docker-by-example/) was originally published on gableroux.com.*\n\n *Cover photo by [Federico Beccari](https://unsplash.com/photos/ahi73ZN5P0Y?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/)*\n {: .note}\n",[4440,110],{"slug":7675,"featured":6,"template":678},"test-all-the-things-gitlab-ci-docker-examples","content:en-us:blog:test-all-the-things-gitlab-ci-docker-examples.yml","Test All The Things Gitlab Ci Docker Examples","en-us/blog/test-all-the-things-gitlab-ci-docker-examples.yml","en-us/blog/test-all-the-things-gitlab-ci-docker-examples",{"_path":7681,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7682,"content":7687,"config":7692,"_id":7694,"_type":16,"title":7695,"_source":17,"_file":7696,"_stem":7697,"_extension":20},"/en-us/blog/how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x",{"title":7683,"description":7684,"ogTitle":7683,"ogDescription":7684,"noIndex":6,"ogImage":2284,"ogUrl":7685,"ogSiteName":692,"ogType":693,"canonicalUrls":7685,"schema":7686},"How a fix in Go 1.9 sped up our Gitaly service by 30x","After noticing a worrying pattern in Gitaly's performance, we uncovered an issue with fork locking affecting virtual memory size. Here's how we figured out the problem and how to fix it.","https://about.gitlab.com/blog/how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How a fix in Go 1.9 sped up our Gitaly service by 30x\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andrew Newdigate\"}],\n        \"datePublished\": \"2018-01-23\",\n      }",{"title":7683,"description":7684,"authors":7688,"heroImage":2284,"date":7689,"body":7690,"category":14,"tags":7691},[1462],"2018-01-23","\n\n[Gitaly](https://gitlab.com/gitlab-org/gitaly) is a Git RPC service that we are currently rolling out\nacross GitLab.com, to replace our legacy NFS-based file-sharing solution. We expect it to be faster, more stable\nand the basis for amazing new features in the future.\n\nWe're still in the process of porting Git operations to Gitaly, but the service has been\nrunning in production on GitLab.com for about nine months, and currently peaks at about 1,000\n[gRPC](https://grpc.io/) requests per second. We expect the migration effort to be completed\nby the beginning of April at which point all Git operations in the GitLab application will\nuse the service and we'll be able to decommission NFS infrastructure.\n\n\u003C!-- more -->\n\n## Worrying performance improvements\n\nThe first time we realized that something might be wrong was shortly after we'd finished deploying a new release.\n\nWe were monitoring the performance of one of the gRPC endpoints for the Gitaly service and noticed that the\n99th percentile performance of the endpoint had dropped from 400ms down to 100ms.\n\n![400ms to 100ms latency drop](https://about.gitlab.com/images/blogimages/how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x/graph-01.png){: .shadow.center}\nLatencies drop from 400ms to 100ms after a deploy, for no good reason\n{: .note .text-center}\n\nThis should have been fantastic news, but it wasn't. There were no changes that should have led to faster\nresponse times. We hadn't optimized anything in that release; we hadn't changed the runtime and the new\nrelease was using the same version of Git.\n\nEverything _should have_ been exactly the same.\n\nWe started digging into the data a little more and quickly realised that 400ms is a very high latency for\nan operation that simply confirms the existence of a [Git reference](https://git-scm.com/book/en/v2/Git-Internals-Git-References).\n\nHow long had it been this way? Well it started about 24 hours after the previous deployment.\n\n![100ms to 400ms latency hike](https://about.gitlab.com/images/blogimages/how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x/graph-02.png){: .shadow.center}\nLatencies rising over a 24 hour period following a deployment, for no good reason\n{: .note .text-center}\n\nWhen browsing our Prometheus performance data, it quickly became apparent that this pattern was being repeated with each\ndeployment: things would start fast and gradually slow down. This was occurring across all endpoints. It had been this way for a while.\n\nThe first assumption was that there was some sort of resource leak in the application, causing the host to slow\ndown over time. Unfortunately the data didn't back this up. CPU usage of the Gitaly service did increase, but the\nhosts still had lots of capacity.\n\n![Gitaly CPU charts](https://about.gitlab.com/images/blogimages/how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x/graph-03.png){: .shadow.center}\nGitaly CPU increasing with process age, but not enough to explain the problem\n{: .note .text-center}\n\nAt this point, we still didn't have any good leads as to the cause of the problem, so we decided to further\nimprove the observability of the application by adding [pprof profiling support](https://golang.org/pkg/net/http/pprof/)\nand [cAdvisor](https://github.com/google/cadvisor) metrics.\n\n## Profiling\n\nAdding pprof support to a Go process is [very easy](https://gitlab.com/gitlab-org/gitaly/merge_requests/442).\nThe process already has a Prometheus listener and we added a pprof handler on the same listener.\n\nSince production teams would need to be able to perform the profiling without our assistance, we\nalso [added a runbook](https://gitlab.com/gitlab-com/runbooks/blob/master/howto/gitaly-profiling.md).\n\nGo's pprof support is easy to use and in our testing, we found that the overhead it\nadded to production workloads was negligible, meaning we could use it in production without concern\nabout the impact it would have on site performance.\n\n## cAdvisor\n\nThe Gitaly service spawns Git child processes for many of its endpoints. Unfortunately these Git\nchild processes don't have the same instrumentation as the parent process so it was\ndifficult to tell if they were contributing to the problem. (Note: we record [`getrlimit(2)`](http://man7.org/linux/man-pages/man2/getrlimit.2.html) metrics for Git processes but cannot observe grandchild processes spawned by Git, which often do much of the heavy lifting)\n\nOn GitLab.com, Gitaly is managed through systemd, which will automatically create a cgroup for\neach service it manages.\n\nThis means that Gitaly and its child processes are contained within a single cgroup, which we\ncould monitor with [cAdvisor](https://github.com/google/cadvisor), a Google monitoring tool\nwhich supports cgroups and is compatible with Prometheus.\n\nAlthough we didn't have direct metrics to determine the behavior of the Git processes, we could\ninfer it using the cgroup metrics and the Gitaly process metrics: the difference between the\ntwo would tell us the resources (CPU, memory, etc) being consumed by the Git child processes.\n\nAt our request, the production team [added cAdvisor to the Gitaly servers](https://gitlab.com/gitlab-com/infrastructure/issues/3307).\n\nHaving cAdvisor gives us the ability to know what the Gitaly service, including all its child\nprocesses, is doing.\n\n![cAdvisor graphs for the Gitaly cgroup](https://about.gitlab.com/images/blogimages/how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x/graph-04.png){: .shadow.center}\ncAdvisor graphs of the Gitaly cgroup\n{: .note .text-center}\n\n## From bad to worse. Much, much worse...\n\nIn the meantime, **[the situation had got far worse](https://gitlab.com/gitlab-org/gitaly/issues/823)**.\n Instead of only seeing gradual latency increases over time, we were now seeing far more serious lockups.\n\nIndividual Gitaly server instances would grind to a halt, to the point where all new incoming TCP connections\nwere not being accepted. This proved to be a problem to using pprof: during the lockup the connection\nwould time out when attempting to profile the process. Since the reason we added pprof was to observe the\nprocess under duress, that approach was a bust.\n\nInterestingly, during a lock-up, CPU would actually decrease – the system was not overloaded, but actually\n _idled_. Iops, iowait and CPU would all drop way down.\n\nEventually, after a few minutes the service would recover and there would be a surge in backlogged\nrequests. Usually though, as soon as the state was detected, the production team would restart the\nservice manually.\n\nThe team spent a significant amount of time trying to recreate the problem locally, with little success.\n\n## Forking locks\n\nWithout pprof, we fell back to [SIGABRT thread dumps](http://pro-tips-dot-com.tumblr.com/post/47677612115/kill-a-hung-go-process-and-print-stack-traces)\nof hung processes. Using these, we determined that the process had a large amount of contention around [`syscall.ForkLock`](https://gitlab.com/gitlab-org/gitaly/issues/823#note_50951140)\nduring the lockups. In one dump, 1,400 goroutines were blocked waiting on `ForkLock` – most for several minutes.\n\n`syscall.ForkLock` has [the following documentation](https://github.com/golang/go/blob/release-branch.go1.8/src/syscall/exec_unix.go#L17):\n\n> Lock synchronizing creation of new file descriptors with fork.\n\nEach Gitaly server instance was `fork/exec`'ing Git processes about 20 times per second so we seemed to finally have a very promising lead.\n\n## Serendipity\n\n[Researching ForkLock](https://gitlab.com/gitlab-com/www-gitlab-com/merge_requests/9365#note_54342481) led us to an issue on the Go repository,\nopened in 2013, about switching from `fork/exec` to [`clone(2)`](https://man7.org/linux/man-pages/man2/clone.2.html) with `CLONE_VFORK` and `CLONE_VM`\non systems that support it: [golang/go#5838](https://github.com/golang/go/issues/5838)\n\nThe `clone(2)` syscall with `CLONE_VFORK` and `CLONE_VM` is the same as\nthe [`posix_spawn(3)`](http://man7.org/linux/man-pages/man3/posix_spawn.3.html) c function, but the latter is easier to\nrefer to, so let's use that.\n\nWhen using `fork`, the child process will start with a copy of the parent processes' memory.\nUnfortunately this process takes longer the larger the virtual memory footprint the process has.\nEven with copy-on-write, it can take several hundred milliseconds in a memory-intensive process.\n`posix_spawn` doesn't copy the parent processes' memory space and has a roughly constant time.\n\nSome good benchmarks of `fork/exec` vs. `posix_spawn` can be found here: [https://github.com/rtomayko/posix-spawn#benchmarks](https://github.com/rtomayko/posix-spawn#benchmarks)\n\nThis seemed like a possible explanation. Over time, the virtual memory size (VMM) of the Gitaly process would increase. As VMM\nincreased, each [`fork(2)`](http://man7.org/linux/man-pages/man2/fork.2.html) syscall would take longer. As fork latency increased, `syscall.ForkLock` contention would increase.\nIf `fork` time exceeded the frequency of `fork` requests, the system could temporarily lock up entirely.\n\n(Interestingly, [`TCPListener.Accept`](https://golang.org/pkg/net/#TCPListener.Accept)\n[also interacts](https://github.com/golang/go/blob/2ea7d3461bb41d0ae12b56ee52d43314bcdb97f9/src/net/sock_cloexec.go#L20) with `syscall.ForkLock`,\nalthough only on older versions of Linux. Could this be the cause of our failure to connect to the pprof listener during a lockup?)\n\nBy some incredibly good luck, [golang/go#5838](https://github.com/golang/go/issues/5838), the switch from `fork` to `posix_spawn`, had,\nafter several years' delay, recently landed in Go 1.9, just in time for us. Gitaly had been compiled with Go 1.8.\n We quickly built and tested a new binary with Go 1.9 and manually deployed this\non one of our production servers.\n\n### Spectacular results\n\nHere's the CPU usage of Gitaly processes across the fleet:\n\n![CPU after Go 1.9](https://about.gitlab.com/images/blogimages/how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x/graph-05.png){: .shadow.center}\nCPU after recompiling with Go 1.9\n{: .note .text-center}\n\nHere's the 99th percentile latency figures. This chart is using a logarithmic scale, so we're talking about two orders of\nmagnitude faster!\n\n![30x latency drops with Go 1.9](https://about.gitlab.com/images/blogimages/how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x/graph-06.png){: .shadow.center}\nEndpoint latency after recompiling with Go 1.9 (log scale)\n{: .note .text-center}\n\n## Conclusion\n\nRecompiling with Go 1.9 solved the problem, thanks to the switch to `posix_spawn`. We learned several other lessons\nin the process too:\n\n1. Having solid application monitoring in place allowed us to detect this issue, and start investigating it, far\n   earlier than we otherwise would have been able to.\n1. [pprof](https://blog.golang.org/profiling-go-programs) can be really helpful, but may not help when a process\n   has locked up and won't accept new connections. pprof is lightweight enough that you should consider adding it to your application _before_ you need it.\n1. When all else fails, [`SIGABRT thread dumps`](http://pro-tips-dot-com.tumblr.com/post/47677612115/kill-a-hung-go-process-and-print-stack-traces) might help.\n1. [`cAdvisor`](https://github.com/google/cadvisor) is great for monitoring cgroups. Systemd services each run in\n   their own cgroup, so `cAdvisor` is an easy way of monitoring a service and all its child processes, together.\n\n[Photo](https://unsplash.com/photos/jJbQBP_yh68?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) by Javier García on [Unsplash](https://unsplash.com/search/photos/slow?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[915,704],{"slug":7693,"featured":6,"template":678},"how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x","content:en-us:blog:how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x.yml","How A Fix In Go 19 Sped Up Our Gitaly Service By 30x","en-us/blog/how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x.yml","en-us/blog/how-a-fix-in-go-19-sped-up-our-gitaly-service-by-30x",{"_path":7699,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7700,"content":7706,"config":7712,"_id":7714,"_type":16,"title":7715,"_source":17,"_file":7716,"_stem":7717,"_extension":20},"/en-us/blog/a-beginners-guide-to-continuous-integration",{"title":7701,"description":7702,"ogTitle":7701,"ogDescription":7702,"noIndex":6,"ogImage":7703,"ogUrl":7704,"ogSiteName":692,"ogType":693,"canonicalUrls":7704,"schema":7705},"A beginner's guide to continuous integration","Here's how to help everyone on your team, like designers and testers, get started with GitLab CI.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679800/Blog/Hero%20Images/beginners-guide-to-ci.jpg","https://about.gitlab.com/blog/a-beginners-guide-to-continuous-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A beginner's guide to continuous integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Riccardo Padovani\"}],\n        \"datePublished\": \"2018-01-22\",\n      }",{"title":7701,"description":7702,"authors":7707,"heroImage":7703,"date":7709,"body":7710,"category":14,"tags":7711},[7708],"Riccardo Padovani","2018-01-22","\n\nAt [fleetster](https://www.fleetster.net/) we have our own instance of GitLab and we rely a lot on [GitLab CI/CD](/topics/ci-cd/). Also our designers and QA guys use (and love) it, thanks to its advanced features.\n\n\u003C!-- more -->\n\nGitLab CI/CD is a very powerful system of [continuous integration (CI)](/solutions/continuous-integration/), with a lot of different features, and with every new release, new features land. It has very rich [technical documentation](https://docs.gitlab.com/ee/ci/), but it lacks a generic introduction for people who want to use it in an existing setup. A designer or a tester doesn’t need to know how to autoscale it with [Kubernetes](/blog/how-to-create-ci-cd-pipeline-with-autodeploy-to-kubernetes-using-gitlab-and-helm/) or the difference between an image or a service.\n\nBut still, they need to [know what a pipeline is](/topics/ci-cd/cicd-pipeline/), and how to see a branch deployed to an environment. In this article therefore I will try to cover as many features as possible, highlighting how the end users can enjoy them; in the last months I explained such features to some members of our team, also developers: not everyone knows what continuous integration is or has used Gitlab CI/CD in a previous job.\n\nIf you want to know why continuous integration is important I suggest reading [this article](/blog/7-reasons-why-you-should-be-using-ci/), while for finding the reasons for using Gitlab CI/CD specifically, I leave the job to [GitLab](/solutions/continuous-integration/) itself.\n\n## Introduction\n\nEvery time developers change some code they save their changes in a commit. They can then push that commit to GitLab, so other developers can review the code.\n\nGitLab will also start some work on that commit, if GitLab CI/CD has been configured. This work is executed by a runner. A runner is basically a server (it can be a lot of different things, also your PC, but we can simplify it as a server) that executes instructions listed in the `.gitlab-ci.yml` file, and reports the result back to GitLab itself, which will show it in his graphical interface.\n\nWhen developers have finished implementing a new feature or a bugfix (activity that usual requires multiple commits), they can open a merge request, where other members of the team can comment on the code and on the implementation.\n\nAs we will see, designers and testers can also (and really should!) join this process, giving feedback and suggesting improvements, especially thanks to two features of GitLab CI: environments and artifacts.\n\n## CI/CD pipelines\n\nEvery commit that is pushed to GitLab generates a pipeline attached to that commit. If multiple commits are pushed together the pipeline will be created for the last one only. A pipeline is a collection of jobs split in different stages.\n\nAll the jobs in the same stage run concurrently (if there are enough runners) and the next stage begins only if all the jobs from the previous stage have finished with success.\n\nAs soon as a job fails, the entire pipeline fails. There is an exception for this, as we will see below: if a job is marked as manual, then a failure will not make the pipeline fail.\n\nThe stages are just a logical division between batches of jobs, where it doesn’t make sense to execute the next job if the previous failed. We can have a `build` stage, where all the jobs to build the application are executed, and a `deploy` stage, where the build application is deployed. Doesn’t make much sense to deploy something that failed to build, does it?\n\nEvery job shouldn’t have any dependency with any other job in the same stage, while they can expect results by jobs from a previous stage.\n\nLet’s see how GitLab shows information about stages and stages’ status.\n\n\u003Cimg src=\"/images/blogimages/pipeline-overview.png\" alt=\"Pipeline overview\" style=\"width: 700px;\"/>{: .shadow}\n\n\u003Cimg src=\"/images/blogimages/pipeline-status.png\" alt=\"Pipeline status\" style=\"width: 700px;\"/>{: .shadow}\n\n## What is a CI job?\n\nA job is a collection of instructions that a runner has to execute. You can see in real time what the output of the job is, so developers can understand why a job fails.\n\nA job can be automatic, so it starts automatically when a commit is pushed, or manual. A manual job has to be triggered by someone manually. This can be useful, for example, to automate a deploy, but still to deploy only when someone manually approves it. There is a way to limit who can run a job, so only trustworthy people can deploy, to continue the example before.\n\nA job can also build artifacts that users can download, like it creates an APK you can download and test on your device; in this way both designers and testers can download an application and test it without having to ask for help to developers.\n\nOther than creating artifacts, a job can deploy an environment, usually reachable by an URL, where users can test the commit.\n\nJob status are the same as stages status: indeed stages inherit theirs status from the jobs.\n\n\u003Cimg src=\"/images/blogimages/running-job.png\" alt=\"Running job\" style=\"width: 700px;\"/>{: .shadow}\n\n## Artifacts\n\nAs we said, a job can create an artifact that users can download to test. It can be anything, like an application for Windows, an image generated by a PC, or an APK for Android.\n\nSo you are a designer, and the merge request has been assigned to you: you need to validate the implementation of the new design!\n\nBut how to do that?\n\nYou need to open the merge request, and download the artifact, as shown in the figure.\n\nEvery pipeline collects all the artifacts from all the jobs, and every job can have multiple artifacts. When you click on the download button, a dropdown will appear where you can select which artifact you want. After the review, you can leave a comment on the MR.\n\nYou can also always download the artifacts from pipelines that do not have a merge request open ;-)\n\nI am focusing on merge requests because usually that is where testers, designers, and shareholders in general enter the workflow.\n\nBut merge requests are not linked to pipelines: while they integrate nicely with one another, they do not have any relation.\n\n\u003Cimg src=\"/images/blogimages/download-artifacts.png\" alt=\"Download artifacts\" style=\"width: 700px;\"/>{: .shadow}\n\n## CI/CD environments\n\nIn a similar way, a job can deploy something to an external server, so you can reach it through the merge request itself.\n\nAs you can see, the environment has a name and a link. Just by clicking the link you to go to a deployed version of your application (of course, if your team has set it up correctly).\n\nYou can also click on the name of the environment, because GitLab also has other cool features for environments, like [monitoring](https://gitlab.com/help/ci/environments.md).\n\n\u003Cimg src=\"/images/blogimages/environment.png\" alt=\"environment\" style=\"width: 700px;\"/>{: .shadow}\n\n## Conclusion\n\nThis was a small introduction to some of the features of GitLab CI: it is very powerful, and using it in the right way allows all the team to use just one tool to go from planning to deploying. A lot of new features are introduced every month, so keep an eye on the [GitLab blog](/blog/).\n\nFor setting it up, or for more advanced features, take a look at the [documentation](https://docs.gitlab.com/ee/ci/).\n\nIn fleetster we use it not only for running tests, but also for having automatic versioning of the software and automatic deploys to testing environments. We have automated other jobs as well (building apps and publishing them on the Play Store and so on).\n\n\n## About the guest author\n\nRiccardo is a university student and a part-time developer at [fleetster](https://www.fleetster.net/). When not busy with university or work, he likes to contribute to open source projects.\n\n *[An introduction to continuous integration](https://rpadovani.com/introduction-gitlab-ci) was originally published on rpadovani.com.*\n\n*Cover photo by [Mike Tinnion](https://unsplash.com/photos/3ym6i13Y9LU?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/)*\n{: .note}\n",[110,4440],{"slug":7713,"featured":6,"template":678},"a-beginners-guide-to-continuous-integration","content:en-us:blog:a-beginners-guide-to-continuous-integration.yml","A Beginners Guide To Continuous Integration","en-us/blog/a-beginners-guide-to-continuous-integration.yml","en-us/blog/a-beginners-guide-to-continuous-integration",{"_path":7719,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7720,"content":7726,"config":7731,"_id":7733,"_type":16,"title":7734,"_source":17,"_file":7735,"_stem":7736,"_extension":20},"/en-us/blog/comment-on-commits-feature-tutorial",{"title":7721,"description":7722,"ogTitle":7721,"ogDescription":7722,"noIndex":6,"ogImage":7723,"ogUrl":7724,"ogSiteName":692,"ogType":693,"canonicalUrls":7724,"schema":7725},"Demo: How to use Merge Request Commit Discussions","You can now hold discussions on specific commits within a merge request – check out how it works in this video.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680285/Blog/Hero%20Images/merge-request-commit-discussions.jpg","https://about.gitlab.com/blog/comment-on-commits-feature-tutorial","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Demo: How to use Merge Request Commit Discussions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Victor Wu\"}],\n        \"datePublished\": \"2018-01-04\",\n      }",{"title":7721,"description":7722,"authors":7727,"heroImage":7723,"date":7728,"body":7729,"category":14,"tags":7730},[6668],"2018-01-04","\n\nIn [GitLab 10.3](/releases/2017/12/22/gitlab-10-3-released/) we released a new feature: [Merge Request Commit Discussions](/releases/2017/12/22/gitlab-10-3-released/#merge-request-commit-discussions). This is great news for teams who work at the individual commit level, who want to be able to discuss and collaborate on different commits within one merge request. Watch the video below to see this new workflow in action.\n\n\u003C!-- more -->\n\nIn short: this feature (available in both GitLab [Community and Enterprise Editions](/stages-devops-lifecycle/)) allows you to add comments to [commits within a merge request](/solutions/continuous-integration/). Before you could only add comments to a particular version of a merge request. In the video, you'll see how now when you navigate to a specific commit, you're taken to the \"Changes\" tab, but instead of viewing the latest version, you see the diff associated with that commit.\n\nYou can then leave a comment inline as you would usually, the difference being that your comment now starts a conversation relating specifically to that commit.\n\nIf you leave a comment on another commit, that begins a separate discussion as well. All are accessible from the \"Commits\" tab or the Discussions\" tab. You can resolve discussions as usual, with resolved discussions being collapsed by default, similar to before.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/TviJH6oRboo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWe hope teams who need a more granular approach to approving merge requests will find this useful! As usual, we welcome your feedback – be it on the [release blog post](/releases/2017/12/22/gitlab-10-3-released/) or by opening an issue.\n\n[Cover image](https://unsplash.com/photos/qm3nnbaBl_4) by [Matthew Brodeur](https://unsplash.com/@mrbrodeur) on Unsplash\n{: .note}\n",[749,1347],{"slug":7732,"featured":6,"template":678},"comment-on-commits-feature-tutorial","content:en-us:blog:comment-on-commits-feature-tutorial.yml","Comment On Commits Feature Tutorial","en-us/blog/comment-on-commits-feature-tutorial.yml","en-us/blog/comment-on-commits-feature-tutorial",{"_path":7738,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7739,"content":7744,"config":7749,"_id":7751,"_type":16,"title":7752,"_source":17,"_file":7753,"_stem":7754,"_extension":20},"/en-us/blog/conducting-remote-ux-research",{"title":7740,"description":7741,"ogTitle":7740,"ogDescription":7741,"noIndex":6,"ogImage":2284,"ogUrl":7742,"ogSiteName":692,"ogType":693,"canonicalUrls":7742,"schema":7743},"Conducting remote UX research at GitLab","Learn about the different kinds of UX research we conduct at GitLab.","https://about.gitlab.com/blog/conducting-remote-ux-research","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Conducting remote UX research at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarah O’Donnell\"}],\n        \"datePublished\": \"2017-12-20\",\n      }",{"title":7740,"description":7741,"authors":7745,"heroImage":2284,"date":7746,"body":7747,"category":14,"tags":7748},[6197],"2017-12-20","GitLab is a [remote-only](http://www.remoteonly.org/) organization and just like our [team](/company/team/), our users are spread across the globe. Conducting remote UX research allows us to quickly connect with GitLab users anywhere in the world. It provides us with the opportunity to gather insight into users’ behaviors, motivations and goals when using GitLab. This helps us to determine what features should be built and how they should behave. But how do we do all this remotely?\n\n\u003C!-- more -->\n\nThese are some of the remote UX research methods we use at GitLab.\n\n## Card sorting\n\nCard sorting is a research method for discovering how people understand and categorize information. Each card represents an item or a topic and we ask users to group the cards in a way that makes sense to them. We may also ask them to help us label these groups.\n\nCard sorting can be used to:\n\n- Help design the information architecture of your application\n- Establish what information should be on a page and in what order that information should appear\n- Provide a ranking for items or topics based on a set criteria\n\nWhen analyzing a card sort, we look for common patterns such as which cards appear together the most and which cards are labeled in a similar way.\n\nAt GitLab, we’re currently using card sorting to restructure the sidebar navigation at a project and group level. We want to understand how you, our users, would expect our features to be grouped and classified. Our aim is to improve the ease and the speed at which you navigate around GitLab. We conduct remote card sorting via [Optimal Workshop](https://www.optimalworkshop.com/).\n\n## First-click testing\n\nFirst-click testing explores what users click on first when completing a task within an interface. It tells us whether users are able to find what they’re looking for quickly and easily. This research method is based on the principle that users are two to three times more likely to find what they are looking for if their initial click is correct, rather than a click in the wrong direction.\n\nWe’ve used first-click testing at GitLab to quickly evaluate multiple design ideas against one another. We share our designs with users via [UsabilityHub](https://usabilityhub.com/). We measure whether users take the correct path and how long it takes them to decide where to click. A slower click time would suggest a user has hesitated about where to click.\n\nFirst-click testing is great for providing an indication of whether a design is intuitive to users and helps us to quickly narrow down multiple design concepts.\n\n## Surveys\n\nSurveys are used to investigate the opinions or experiences of users by asking them questions through an online form. A survey invites people to share open and honest feedback. Some people find them less intimidating than other forms of research as there is the option to remain anonymous when providing answers. They also allow us to track how the attitudes and behaviors of our users change over time.\n\nWe’ve used surveys to understand our users and form [personas](https://design.gitlab.com/), to generate new ideas for future GitLab improvements and to help measure users’ satisfaction with our existing features.\n\n## User interviews\n\nIf you take part in a user interview at GitLab, you’ll usually be speaking one on one with a UX researcher. In order to do this, you’ll need a desktop or laptop computer and a headset with a microphone.\n\nWe find that most of our users like to talk with us on their lunch break at their work station, whether situated at home or in an office. We love this, as it provides some insight into the environment in which you use GitLab.\n\nOften our interviews are focused on you! We’ll ask you to chat about things such as your background, occupation and experience with GitLab. Sometimes we might have a particular topic we’d like to discuss, such as how you’ve incorporated GitLab into your workflow. We’ll always tell you our intentions ahead of the call so you have time to think about what you’d like to contribute to the discussion. We also welcome you to share your screen with us during the call. We understand that it is sometimes easier to show and demonstrate something than it is to just talk about it!\n\nWe’ve used feedback from user interviews to:\n\n- Inform our [personas](https://docs.gitlab.com/ee/development/ux_guide/users.html)\n- Follow up on survey answers\n- Understand and develop objectives and goals for features\n\n## Usability testing\n\nUsability testing is a technique used to evaluate a product by testing it with representative users. Usability testing can be divided into two categories: moderated and unmoderated research.\n\n**Moderated**\n\nIf you participate in moderated usability testing at GitLab, you’ll complete a series of tasks whilst being observed by one of our UX researchers. In order to see what you're doing, we'll ask you to share your screen with us. We use [Zoom](https://zoom.us/) to run our moderated usability testing sessions.\n\nAs you use GitLab, we’ll ask you to try and think out loud: tell us what you’re looking at, what you’re trying to do and what you’re thinking. We’re interested in hearing your honest feedback. Sound scary? It really isn’t! It’s important to remember that we’re testing GitLab, not you. You can’t say or do anything wrong during a study.\n\nModerated research allows for conversation between a user and the UX researcher, because both are online simultaneously. It gives the researcher the opportunity to ask a user follow-up questions regarding something they’ve said or done. Subsequently, moderated research provides us with a lot of in-depth qualitative research about our users’ needs. It can help us to uncover usability problems that we weren’t aware of and to generate solutions to solve these problems.\n\n**Unmoderated**\n\nUnlike moderated research, unmoderated research doesn't involve any conversation between a user and a UX researcher. Instead, unmoderated usability testing sessions are completed alone by a user. As users can complete sessions at their own convenience and studies can be run simultaneously, they're good for collecting data quickly.\n\nWe use [Validately](https://validately.com/) to serve the tasks to you and to record your actions. We then analyze the data collected asynchronously. It is, however, still very helpful to us if you try and think out loud while you’re completing tasks.\n\nUnmoderated research can provide some qualitative data. However, as there’s no opportunity to ask users follow-up questions related to their actions, the study should focus on a few specific elements or relatively minor changes. Unmoderated research is usually better at addressing specific quantitative questions, such as:\n\n- What percentage of users successfully completed the task?\n- How long did it take users to complete the task?\n\nAs a researcher cannot view an unmoderated usability testing session until it's completed, there's a risk of a study being unusable if the user didn't complete the tasks as specified or if they ran into technical difficulties.\n\nWe conduct both moderated and unmoderated usability testing sessions at GitLab to test new features and changes to existing features.\n\n## How can I get involved?\n\nWe’re always looking for people to participate in our research, whether you're a GitLab user or not. You can get involved by signing up for [GitLab First Look](/community/gitlab-first-look/), a comprehensive research program that will help us ship the features and fixes you need to do your best work.  Besides being instrumental in shaping the future of GitLab, you’ll have the opportunity to earn gift cards and win awesome tech prizes by sharing your feedback with us.\n",[2409,915,1144],{"slug":7750,"featured":6,"template":678},"conducting-remote-ux-research","content:en-us:blog:conducting-remote-ux-research.yml","Conducting Remote Ux Research","en-us/blog/conducting-remote-ux-research.yml","en-us/blog/conducting-remote-ux-research",{"_path":7756,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7757,"content":7763,"config":7769,"_id":7771,"_type":16,"title":7772,"_source":17,"_file":7773,"_stem":7774,"_extension":20},"/en-us/blog/moving-to-headless-chrome",{"title":7758,"description":7759,"ogTitle":7758,"ogDescription":7759,"noIndex":6,"ogImage":7760,"ogUrl":7761,"ogSiteName":692,"ogType":693,"canonicalUrls":7761,"schema":7762},"How GitLab switched to Headless Chrome for testing","A detailed explanation with examples of how GitLab made the switch to headless Chrome.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680270/Blog/Hero%20Images/headless-chrome-cover.jpg","https://about.gitlab.com/blog/moving-to-headless-chrome","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab switched to Headless Chrome for testing\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mike Greiling\"}],\n        \"datePublished\": \"2017-12-19\",\n      }",{"title":7758,"description":7759,"authors":7764,"heroImage":7760,"date":7766,"body":7767,"category":14,"tags":7768},[7765],"Mike Greiling","2017-12-19","\n\nGitLab recently switched from PhantomJS to headless Chrome for both our\nfrontend tests and our RSpec feature tests. In this post we will detail the\nreasons we made this transition, the challenges we faced, and the solutions we\ndeveloped. We hope this will benefit others making the switch.\n\n\u003C!-- more -->\n\nWe now have a truly accurate way to test GitLab within a real, modern browser.\nThe switch has improved our ability to write tests and debug them while running\nthem directly in Chrome. Plus the change forced us to confront and clean up a\nnumber of hacks we had been using in our tests.\n\n## Switching to headless Chrome from PhantomJS: background\n\n[PhantomJS](http://phantomjs.org) has been a part of GitLab's test framework\n[for almost five years](https://gitlab.com/gitlab-org/gitlab-ce/commit/ba25b2dc84cc25e66d6fa1450fee39c9bac002c5).\nIt has been an immensely useful tool for running browser integration tests in a\nheadless environment at a time when few options were available. However, it\nhad some shortcomings:\n\nThe most recent version of PhantomJS (v2.1.1) is compiled with a three-year-old\nversion of [QtWebKit](https://trac.webkit.org/wiki/QtWebKit) (a fork of WebKit\nv538.1 according to the user-agent string). This puts it on par with something\nlike Safari 7 on macOS 10.9. It resembles a real modern browser, but it's not\nquite there. It has a different JavaScript engine, an older rendering engine,\nand a host of missing features and quirks.\n\nAt this time, GitLab supports [the current and previous major\nrelease](https://docs.gitlab.com/ee/install/requirements.html#supported-web-browsers) of\nFirefox, Chrome, Safari, and Microsoft Edge/IE. This puts PhantomJS and its\ncapabilities somewhere near or below our lowest common denominator. Many modern\nbrowser features either [do not work](http://phantomjs.org/supported-web-standards.html),\nor [require vendor prefixes](http://phantomjs.org/tips-and-tricks.html) and\npolyfills that none of our supported browsers require. We could selectively\nadd these polyfills, prefixes, and other workarounds just within our test\nenvironment, but doing so would increase technical debt, cause confusion, and\nmake the tests less representative of a true production environment. In most\ncases we had opted to simply omit them or hack around them (more on this\n[later](#trigger-method)).\n\nHere's a screenshot of the way PhantomJS renders a page from GitLab, followed\nby the same page rendered in Google Chrome:\n\n![Page Rendered by PhantomJS](https://about.gitlab.com/images/blogimages/moving-to-headless-chrome/render-phantomjs.png){: .shadow.center}\n\n![Page Rendered by Google Chrome](https://about.gitlab.com/images/blogimages/moving-to-headless-chrome/render-chrome.png){: .shadow.center}\n\nYou can see in PhantomJS the filter tabs are rendered horizontally, the icons\nin the sidebar render on their own lines, the global search field is\noverflowing off the navbar, etc.\n\nWhile it looks ugly, in most cases we could still use this to run functional\ntests, so long as elements of the page remain visible and clickable, but this\ndisparity with the way GitLab rendered in a real browser did introduce several\nedge cases.\n\n## What is headless Chrome\n\nIn April of this year, [news spread](https://news.ycombinator.com/item?id=14101233)\nthat Chrome 59 would support a [native, cross-platform headless\nmode](https://www.chromestatus.com/features/5678767817097216). It was\npreviously possible to simulate a headless Chrome browser in CI/CD [using\nvirtual frame buffer](https://gist.github.com/addyosmani/5336747), but this\nrequired a lot of memory and extra complexities. A native headless mode is a\ngame changer. It is now possible to run integration tests in a headless\nenvironment on a real, modern web browser that our users actually use!\n\nSoon after this was revealed, Vitaly Slobodin, PhantomJS's chief developer,\nannounced that the project [would no longer be\nmaintained](https://github.com/ariya/phantomjs/issues/15105#issuecomment-322850178):\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-cards=\"hidden\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">This is the end - \u003Ca href=\"https://t.co/GVmimAyRB5\">https://t.co/GVmimAyRB5\u003C/a>\u003Ca href=\"https://twitter.com/hashtag/phantomjs?src=hash&amp;ref_src=twsrc%5Etfw\">#phantomjs\u003C/a> 2.5 will not be released. Sorry, guys!\u003C/p>&mdash; Vitaly Slobodin (@Vitalliumm) \u003Ca href=\"https://twitter.com/Vitalliumm/status/852450027318464513?ref_src=twsrc%5Etfw\">April 13, 2017\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nIt became clear that we would need to make the transition away from PhantomJS at\nsome point, so we [opened up an issue](https://gitlab.com/gitlab-org/gitlab-ce/issues/30876),\ndownloaded the Chrome 59 beta, and started looking at options.\n\n### Frontend tests (Karma)\n\nOur frontend test suite utilizes the [Karma](http://karma-runner.github.io/)\ntest runner, and updating this to work with Google Chrome was surprisingly\nsimple ([here's the merge request](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/12036)).\nThe [karma-chrome-launcher](https://github.com/karma-runner/karma-chrome-launcher)\nplugin was very quickly updated to support headless mode starting from\n[version 2.1.0](https://github.com/karma-runner/karma-chrome-launcher/releases/tag/v2.1.0),\nand it was essentially a drop-in replacement for the PhantomJS launcher. Once\nwe [re-built our CI/CD build images](https://gitlab.com/gitlab-org/gitlab-build-images/merge_requests/41)\nto include Google Chrome 59 (and fiddled around with some pesky timeout\nsettings), it worked!  We were also able to remove some rather ugly\nPhantomJS-specific hacks that Jasmine required to spy on some built-in browser\nfunctions.\n\n### Backend feature tests (RSpec + Capybara)\n\nOur feature tests use RSpec and [Capybara](https://github.com/teamcapybara/capybara)\nto perform full end-to-end integration testing of database, backend, and\nfrontend interactions. Before switching to headless Chrome, we had used\n[Poltergeist](https://github.com/teampoltergeist/poltergeist) which is a\nPhantomJS driver for Capybara. It would spin up a PhantomJS browser instance\nand direct it to browse, fill out forms, and click around on pages to verify\nthat everything behaved as it should.\n\nSwitching from PhantomJS to Google Chrome required a change in drivers from\nPoltergeist to Selenium and [ChromeDriver](https://sites.google.com/a/chromium.org/chromedriver/).\nSetting this up was pretty straightforward. You can install ChromeDriver on\nmacOS with `brew install chromedriver` and the process is similar on any given\npackage manager in Linux. After this we added the `selenium-webdriver` gem to\nour test dependencies and configured Capybara like so:\n\n```ruby\nrequire 'selenium-webdriver'\n\nCapybara.register_driver :chrome do |app|\n  options = Selenium::WebDriver::Chrome::Options.new(\n    args: %w[headless disable-gpu no-sandbox]\n  )\n  Capybara::Selenium::Driver.new(app, browser: :chrome, options: options)\nend\n\nCapybara.javascript_driver = :chrome\n```\n\nGoogle says the [`disable-gpu` option is necessary for the time\nbeing](https://developers.google.com/web/updates/2017/04/headless-chrome#cli)\nuntil some bugs are resolved. The `no-sandbox` option also appears to be\nnecessary to get Chrome running inside a Docker container for [GitLab's CI/CD\nenvironment](/topics/ci-cd/). Google provides a [useful guide for working with headless Chrome\nand Selenium](https://developers.google.com/web/updates/2017/04/headless-chrome).\n\nIn our final implementation we changed this to conditionally add the `headless`\noption unless you have `CHROME_HEADLESS=false` in your environment. This makes\nit easy to disable headless mode while debugging or writing tests. It's also\npretty fun to watch tests execute on the browser window in real time:\n\n```shell\nexport CHROME_HEADLESS=false\nbundle exec rspec spec/features/merge_requests/filter_merge_requests_spec.rb\n```\n\n![Tests Executing in Chrome](https://about.gitlab.com/images/blogimages/moving-to-headless-chrome/headlessless-chrome-tests.gif){: .shadow.center}\n\n### What is the differences between Poltergeist and Selenium?\n\nThe process of switching drivers here was not nearly as straightforward as\nit was with the frontend test suite. Dozens of tests started failing as soon\nas we changed our Capybara configuration, and this was due to some major\ndifferences in the way Selenium/ChromeDriver implemented Capybara's driver API\ncompared to Poltergeist/PhantomJS. Here are some of the challenges we ran into:\n\n1.  **JavaScript modals are no longer accepted automatically**\n\n    We often use JavaScript `confirm(\"Are you sure you want to do X?\");` click\n    events when performing a destructive action such as deleting a branch or\n    removing a user from a group. Under Poltergeist a `.click` action would\n    automatically accept modals like `alert()` and `confirm()`, but under\n    Selenium, you now need to wrap these with one of `accept_alert`,\n    `accept_confirm`, or `dismiss_confirm`. e.g.:\n\n    ```ruby\n    # Before\n    page.within('.some-selector') do\n      click_link 'Delete'\n    end\n\n    # After\n    page.within('.some-selector') do\n      accept_confirm { click_link 'Delete' }\n    end\n    ```\n\n1.  **Selenium `Element.visible?` returns false for empty elements**\n\n    If you have an empty `div` or `span` that you want to access in your test,\n    Selenium does not consider these \"visible.\" This is not much of an issue\n    unless you set `Capybara.ignore_hidden_elements = true` as we do in our\n    feature tests. Where `find('.empty-div')` would have worked fine in\n    Poltergeist, we now need to use `visible: :any` to\n    select such elements.\n\n    ```ruby\n    # Before\n    find('.empty-div')\n\n    # After\n    find('.empty-div', visible: :any)\n    # or\n    find('.empty-div', visible: false)\n    ```\n\n    More on [Capybara and hidden elements](https://makandracards.com/makandra/7617-change-how-capybara-sees-or-ignores-hidden-elements).\n\n1.  {:#trigger-method} **Poltergeist's `Element.trigger('click')` method does not exist in Selenium**\n\n    In Capybara, when you use `find('.some-selector').click`, the element you\n    are clicking must be both visible and unobscured by any overlapping\n    element. Situations where links could not be clicked would sometimes occur\n    with Poltergeist/PhantomJS due to its poor CSS support sans-prefixes.\n    Here's one example:\n\n    ![Overlapping elements](https://about.gitlab.com/images/blogimages/moving-to-headless-chrome/overlapping-element.png){: .shadow.center}\n\n    The broken layout of the search form here was actually placing an invisible\n    element over the top of the \"Update all\" button, making it unclickable.\n    Poltergeist offers a `.trigger('click')` method to work around this.\n    Rather than actually clicking the element, this method would trigger a DOM\n    event to simulate a click. Utilizing this method was a bad practice, but\n    we ran into similar issues so often that many developers formed a habit\n    of using it everywhere. This began to lead to some lazy and sloppy test\n    writing. For instance, someone might use `.trigger` as a shortcut to click\n    on an link that was obscured behind an open dropdown menu, when a properly\n    written test should `.click` somewhere to close the dropdown, and _then_\n    `.click` on the item behind it.\n\n    Selenium does not support the `.trigger` method. Now that we were using a\n    more accurate rendering engine that won't break our layouts, many of these\n    instances could be resolved by simply replacing `.trigger('click')` with\n    `.click`, but due to some of the bad practice uses mentioned above, this\n    didn't always work.\n\n    There are of course some ways to hack a `.trigger` replacement. You could\n    simulate a click by focusing on an element and hitting the \"return\" key,\n    or use JavaScript to trigger a click event, but in most cases we decided to\n    take the time and actually correct these poorly implemented tests so that a\n    normal `.click` could again be used. After all, if our tests are meant to\n    simulate a real user interacting with the page, we should limit ourselves\n    to the actions a real user would be expected to use.\n\n    ```ruby\n    # Before\n    find('.obscured-link').trigger('click')\n\n    # After\n\n    # bad\n    find('.obscured-link').send_keys(:return)\n\n    # bad\n    execute_script(\"document.querySelector('.obscured-link').click();\")\n\n    # good\n    # do something to make link accessible, then\n    find('.link').click\n    ```\n\n1.  **`Element.send_keys` only works on focus-able elements**\n\n    We had a few places in our code where we would test out our keyboard\n    shortcuts using something like `find('.boards-list').native.send_keys('i')`.\n    It turns out Chrome will not allow you to `send_keys` to any element that\n    cannot be \"focused\", e.g. links, form elements, the document body, or\n    presumably anything with a tab index.\n\n    In all of the cases where we were doing this, triggering `send_keys` on the\n    body element would work since that's ultimately where our event handler was\n    listening anyway:\n\n    ```ruby\n    # Before\n    find('.some-div').native.send_keys('i')\n\n    # After\n    find('body').native.send_keys('i')\n    ```\n\n1.  **`Element.send_keys` does not support non-BMP characters (like emoji)**\n\n    In a few tests, we needed to fill out forms with emoji characters. With\n    Poltergeist we would do this like so:\n\n    ```ruby\n    # Before\n    find('#note-body').native.send_keys('@💃username💃')\n    ```\n\n    In Selenium we would get the following error message:\n\n    ```\n    Selenium::WebDriver::Error::UnknownError:\n        unknown error: ChromeDriver only supports characters in the BMP\n    ```\n\n    To work around this, we added [a JavaScript method to our test bundle that\n    would simulate input and fire off the same DOM events](https://gitlab.com/gitlab-org/gitlab-ce/blob/a8b9852837/app/assets/javascripts/test_utils/simulate_input.js)\n    that an actual keyboard input would generate on every keystroke, then\n    wrapped this with a [ruby helper](https://gitlab.com/gitlab-org/gitlab-ce/blob/a8b9852837/spec/support/input_helper.rb)\n    method that could be called like so:\n\n    ```ruby\n    # After\n    include InputHelper\n\n    simulate_input('#note-body', \"@💃username💃\")\n    ```\n\n1.  **Setting cookies is much more complicated**\n\n    It's quite common to want to set some cookies before `visit`ing a page that\n    you intend to test, whether it's to mock a user session, or toggle a\n    setting. With Poltergeist, this process is really simple. You can use\n    `page.driver.set_cookie`, provide a simple key/value pair, and it will just\n    work as expected, setting a cookie with the correct domain and scope.\n\n    Selenium is quite a bit more strict. The method is now\n    `page.driver.browser.manage.add_cookie`, and it comes with two caveats:\n\n    - You cannot set cookies until you `visit` a page in the domain you intend\n      to scope your cookies to.\n    - Annoyingly, you cannot alter the `path` parameter (or at least we could\n      never get this to work), so it is best to set cookies at the root path.\n\n    Before you `visit` your page, Chrome's url is technically sitting at\n    something like `about:blank;`. When you attempt to set a cookie there, it\n    will refuse because there is no hostname, and you cannot coerce one by\n    providing a domain as an argument. The [Selenium\n    documentation](http://docs.seleniumhq.org/docs/03_webdriver.jsp#cookies)\n    suggests that you do the following:\n\n    > If you are trying to preset cookies before you start interacting with a\n    > site and your homepage is large / takes a while to load, an alternative is\n    > to find a smaller page on the site (typically the 404 page is small, e.g.\n    > `http://example.com/some404page`).\n\n    ```ruby\n    # Before\n    before do\n      page.driver.set_cookie('name', 'value')\n    end\n\n    # After\n    before do\n      visit '/some-root-path'\n      page.driver.browser.manage.add_cookie(name: 'name', value: 'value')\n    end\n    ```\n\n1.  **Page request/response inspection methods are missing**\n\n    Poltergeist very conveniently implemented methods like `page.status_code`\n    and `page.response_headers` which are also present in Capybara's default\n    `RackTest` driver, making it easy to inspect the raw response from the\n    server, in addition to the way that response is rendered by the browser. It\n    also allowed you to inject headers into the requests made to the server,\n    e.g.:\n\n    ```ruby\n    # Before\n    before do\n      page.driver.add_header('Accept', '*/*')\n    end\n\n    it 'returns a 404 page'\n      visit some_path\n\n      expect(page.status_code).to eq(404)\n      expect(page).to have_css('.some-selector')\n    end\n    ```\n\n    Selenium does not implement these methods, and [the authors do not intend\n    to add support for them](https://github.com/seleniumhq/selenium-google-code-issue-archive/issues/141#issuecomment-191404986),\n    so we needed to develop a workaround. Several people have suggested running\n    a proxy alongside ChromeDriver that would intercept all traffic to and from\n    the server, but this seemed to us like overkill. Instead, we opted to\n    create a [lightweight Rack middleware](https://gitlab.com/gitlab-org/gitlab-ce/blob/a8b9852837/lib/gitlab/testing/request_inspector_middleware.rb)\n    and a corresponding [helper class](https://gitlab.com/gitlab-org/gitlab-ce/blob/a8b9852837/spec/support/inspect_requests.rb)\n    that would intercept the traffic for inspection. This is similar to our\n    [RequestBlockerMiddleware](https://gitlab.com/gitlab-org/gitlab-ce/blob/master/lib/gitlab/testing/request_blocker_middleware.rb)\n    that we were already using to intelligently `wait_for_requests` to complete\n    within our tests. It works like this:\n\n    ```ruby\n    # After\n    it 'returns a 404 page'\n      requests = inspect_requests do\n        visit some_path\n      end\n\n      expect(requests.first.status_code).to eq(404)\n      expect(page).to have_css('.some-selector')\n    end\n    ```\n\n    Within the `inspect_requests` block, the Rack middleware will log all\n    requests and responses, and return them as an array for inspection. This\n    will include the page being `visit`ed as well as the subsequent XHR and\n    asset requests, but the initial path request will be the first in the array.\n\n    You can also inject headers using the same helper like so:\n\n    ```ruby\n    # After\n    inspect_requests(inject_headers: { 'Accept' => '*/*' }) do\n      visit some_path\n    end\n    ```\n\n    This middleware should be injected early in the stack to ensure any other\n    middleware that might intercept or modify the request/response will be\n    seen by our tests. We include this line in our test environment config:\n\n    ```ruby\n    config.middleware.insert_before('ActionDispatch::Static', 'Gitlab::Testing::RequestInspectorMiddleware')\n    ```\n\n1.  **Browser console output is no longer output to the terminal**\n\n    Poltergeist would automatically output any `console` messages directly into\n    the terminal in real time as tests were run. If you had a bug in the frontend\n    code that caused a test to fail, this feature would make debugging much\n    easier as you could inspect the terminal output of the test for an error\n    message or a stack trace, or inject your own `console.log()` into the\n    JavaScript to see what is going on. With Selenium this is sadly no longer the\n    case.\n\n    You can, however, collect browser logs by configuring Capybara like so:\n\n    ```ruby\n    capabilities = Selenium::WebDriver::Remote::Capabilities.chrome(\n      loggingPrefs: {\n        browser: \"ALL\",\n        client: \"ALL\",\n        driver: \"ALL\",\n        server: \"ALL\"\n      }\n    )\n\n    # ...\n\n    Capybara::Selenium::Driver.new(\n      app,\n      browser: :chrome,\n      desired_capabilities: capabilities,\n      options: options\n    )\n    ```\n\n    This will allow you to access logs with the following, i.e. in the event of\n    a test failure:\n\n    ```ruby\n    page.driver.manage.get_log(:browser)\n    ```\n\n    This is far more cumbersome than it was in Poltergeist, but it's the best\n    method we've found so far. Thanks to [Larry Reid's blog post](http://technopragmatica.blogspot.com/2017/10/switching-to-headless-chrome-for-rails_31.html)\n    for the tip!\n\n## Results\n\nRegarding performance, we attempted to quantify the change with a\nnon-scientific analysis of 10 full-suite RSpec test runs _before_ this change,\nand 10 more runs from _after_ this change, factoring out any tests that were\nadded or removed between these pipelines. The end result was:\n\n**Before:** 5h 18m 52s\n**After:** 5h 12m 34s\n\nA savings of about six minutes, or roughly 2 percent of the total compute time, is\nstatistically insignificant, so I'm not going to claim we improved our test\nspeed with this change.\n\nWhat we did improve was test accuracy, and we vastly improved the tools at our\ndisposal to write and debug tests. Now, all of the Capybara screenshots\ngenerated when a CI/CD job fails look exactly as they do on your own browser\nrather than resembling the broken PhantomJS screenshot above. Inspecting a\nfailing test locally can now be done interactively by turning off headless\nmode, dropping a `byebug` line into the spec file, and watching the browser\nwindow as you type commands into the prompt. This technique proved extremely\nuseful while working on this project.\n\nYou can find all of the changes we made in [the original merge request page\non GitLab.com](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/12244).\n\n## What are some additional uses for headless Chrome?\n\nWe have also been utilizing headless Chrome to analyze frontend performance, and have found it to be useful in detecting issues.\n\nWe'd like to make it easier for other companies to embrace as well, so as part of the upcoming 10.3 release of GitLab we are releasing [Browser Performance Testing](https://docs.gitlab.com/ee/user/project/merge_requests/browser_performance_testing.html). Leveraging [GitLab CI/CD](/solutions/continuous-integration/), headless Chrome is launched against a set of pages and an overall performance score is calculated. Then for each merge request the scores are compared between the source and target branches, making it easier detect performance regressions prior to merge.\n\n## Acknowledgements\n\nI sincerely hope this information will prove useful to anybody else looking to\nmake the switch from PhantomJS to headless Chrome for their Rails application.\n\nThanks to the Google team for their very helpful documentation, thanks to the\nmany bloggers out there who shared their own experiences with hacking headless\nChrome in the early days of its availability, and special thanks to Vitaly\nSlobodin and the rest of the contributors to PhantomJS who provided us with an\nextremely useful tool that served us for many years. 🙇‍\n\n\u003Cstyle>\n\n.center {\n  text-align: center;\n  display: block;\n  margin-right: auto;\n  margin-left: auto;\n}\n\ncode, kbd {\n  font-size: 80%;\n}\n\n\u003C/style>\n",[915,1979,1328],{"slug":7770,"featured":6,"template":678},"moving-to-headless-chrome","content:en-us:blog:moving-to-headless-chrome.yml","Moving To Headless Chrome","en-us/blog/moving-to-headless-chrome.yml","en-us/blog/moving-to-headless-chrome",{"_path":7776,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7777,"content":7782,"config":7788,"_id":7790,"_type":16,"title":7791,"_source":17,"_file":7792,"_stem":7793,"_extension":20},"/en-us/blog/gitlab-design-library",{"title":7778,"description":7779,"ogTitle":7778,"ogDescription":7779,"noIndex":6,"ogImage":2284,"ogUrl":7780,"ogSiteName":692,"ogType":693,"canonicalUrls":7780,"schema":7781},"Scaling design: The start of system thinking","How we began the process of introducing a design system to GitLab.","https://about.gitlab.com/blog/gitlab-design-library","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Scaling design: The start of system thinking\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taurie Davis\"}],\n        \"datePublished\": \"2017-12-12\",\n      }",{"title":7778,"description":7779,"authors":7783,"heroImage":2284,"date":7785,"body":7786,"category":14,"tags":7787},[7784],"Taurie Davis","2017-12-12","\n\nScaling design within an application is a struggle. Design systems help alleviate problems that arise with scaling by making it easier to find inconsistent interactions or conflicting messaging. However, it can be extremely difficult to introduce a new system to teams that are already functioning without one. Here's how we got started.\n\n\u003C!-- more -->\n\nWe took the initial step towards establishing our own system by creating a pattern library of reusable components that can be shared and reused across the application.\n\n## Design as a language\n\nConsistency within the UI and increased iteration speed are clear benefits for using a design library. This helps keep the application [DRY](http://programmer.97things.oreilly.com/wiki/index.php/Don't_Repeat_Yourself) and allows designers to focus their efforts on solving user needs, rather than recreating elements and reinventing solutions. In an effort to create a library that is understood by multiple teams, it's important to begin thinking about design as a language.\n\nYour design language is an integral part of a design system that clearly defines the semantics of your visual designs and allows your team to thoroughly document guidelines. It's important that the team not only understands how the system is built, but also the reasoning behind the choices made. This will ultimately help enable your team to build a library of components that support the semantics you have established.\n\n## Getting started\n\nKnowing where to start can be daunting. We began by first understanding the current state of our application. By auditing current designs that were implemented, we found numerous inconsistencies across our interface and determined that we lacked a solid design language to build from. A search within our variables revealed that we had **82 different gray values defined within the UI**. We also had an undefined type scale that included **at least 30 different values** in pixels, rems, and percentages.\n\nBy understanding the problems our current system had, we were able to start building a solid foundation to work from. We defined and documented our perceptual patterns which included styles that aid in the aesthetic of the brand: typography, icons, colors, and a measurement system.\n\n{: .text-center}\n![Library foundation](https://about.gitlab.com/images/blogimages/gitlab-design-library/library--styles@2x.png){: .shadow}\n\nOnce our perceptual patterns were defined, we started applying them to our components. We took a couple core pieces of our application and mocked them up using our new guidelines to ensure that our new rules were not too rigid and would be flexible enough to still encourage the creation of new ideas and methods while designing new components.\n\nOnce we nailed down our styles, we were able to start identifying functional patterns that needed to be built out using our new guidelines. Functional patterns include global modules that can be reused throughout your application, such as buttons, dropdowns, and tabs.\n\nThere were a few instances where our newly defined styles did not work well in our actual designs. For example, we determined that our 8px measurement system was too strict for right and left padding on horizontal tabs, buttons, and inputs. Although it was not a part of our measurement system, we decided as a team to create a new rule that would allow for a 12px measure in order better align stacked items while giving elements enough room to breathe.\n\n{: .text-center}\n![Library foundation](https://about.gitlab.com/images/blogimages/gitlab-design-library/library--measures@2x.png){: .shadow}\n\nBuilding out these components gave us the opportunity to alter and add to our new perceptual patterns. It is okay to allow some flexibility within your design library, so long as the rules and use cases are clearly defined.\n\n## Structure\n\nWe set up our design library using a [primary sketch file](https://gitlab.com/gitlab-org/gitlab-design/blob/master/production/resources/gitlab-elements.sketch) that includes all the components and styles that have been added to our team library. As we began building out multiple components, it was important to define a structure that would mimic the way components are implemented on the frontend. This would allow the design and frontend teams to work more closely together, ensuring that components were DRY and reusable. We chose to implement [Brad Frost's Atomic Design](http://bradfrost.com/blog/post/atomic-web-design/) principles in order to accomplish this. Atomic design \"break[s] entire interfaces down into fundamental building blocks,\" ensuring that everything is constructed in a methodical way. These building blocks consist of:\n\n**Atoms:** Elements that cannot be broken down further. This can include type styles, buttons, labels, and inputs\n\n**Molecules:** A group of atoms that function as a unit, such as a form.\n\n**Organisms:** A high-level component that consists of several molecules to make up its own structure. This can include a header or a sidebar.\n\nThere has been a lot written on Atomic Design. To learn more I recommend:\n\n- [Atomic Design by Brad Frost](http://atomicdesign.bradfrost.com/)\n- [Atomic Design by Brad Frost - An Event Apart video](https://vimeo.com/179245570)\n- [Pattern Lab](http://patternlab.io/)\n\nFollowing this structure forces the team to think carefully about what each part of a design is made up of, as well as easily define global components. If a modifier consists of atoms that are not used elsewhere, we encourage designers to think about whether a specific atom is necessary for that paradigm or if an existing global component would work in its place.\n\nIn the following example, we've built out our left navigational sidebar. This organism comprises molecules, and these molecules comprise globally used atoms (an avatar, badge, typography, and icons). We also include molecule modifiers, which make it easy to see the different states that a molecule can have. These together build the basis of the sidebar.\n\n{: .text-center}\n![Library foundation](https://about.gitlab.com/images/blogimages/gitlab-design-library/library--atomic@2x.png){: .shadow}\n\nWe use [symbols within Sketch](https://sketchapp.com/docs/symbols/) to create our atoms and molecules, while leaving organisms as groups so that we can easily modify and override specific aspects to fit the design we are working on.\n\n## Tooling\n\nChoosing tools can be an arduous task, especially with the number of options available for designers today. It is easy to get caught up in the latest tool and turn progress into tool churn. At GitLab, we took the time to evaluate multiple tools that would assist in the creation of a team library.\n\nSome of the issues we ran into while evaluating plugins were:\n\n- Slow performance, as well as bugs, when adding, changing, and renaming components\n- Overriding options when adding symbols to a new document were not pulled in or included automatically\n- Text styles weren't being saved or included in symbols that were pulled into a new document\n\nWe eventually decided to move forward using [Brand.ai](https://brand.ai) as a plugin for Sketch. This plugin solved many of the issues we were running into with other tools. However, while this plugin was the best that we found at the time, no tool is perfect:\n\n- Brand. ai limits the organization of components to one level deep\n- While faster and less buggy than other plugins, Brand.ai is still not as fast as we would like :rocket:\n\n{: .text-center}\n![Library foundation](https://about.gitlab.com/images/blogimages/gitlab-design-library/library--brandai@2x.png){: .shadow}\n\nAt GitLab, we don't look at Brand.ai as the answer. It is solely a tool to help aid us in the creation process. Since deciding on using Brand.ai, Sketch has released their own [library feature](https://blog.sketchapp.com/libraries-an-in-depth-look-56b147022e1f), Brand.ai was [acquired by InVision](https://www.invisionapp.com/blog/announcing-invision-design-system-manager/), and Figma has added numerous new features to aid in the creation of a design library. Tools are constantly transforming, but it's important to keep in mind that constantly changing tools may slow progress. Evaluate your tools carefully and decide what is best for your team at this moment. Remember that pattern libraries are only one aspect of a design system that helps make it more effective. The tools and technologies you use to create the library are meant to help your team, not act as the solution.\n\n## Moving forward\n\nConversations around design systems have exploded in recent years. Just over the last few months, Figma has begun sponsoring [Design System Dinners](https://www.designsystems.com/), InVision has created a [Design Systems Handbook](https://www.designbetter.co/design-systems-handbook/introducing-design-systems), and Smashing Magazine released [*Design Systems*](https://www.smashingmagazine.com/design-systems-book/) as their newest book.\n\nAt GitLab, we have only just begun the work on our design system. A design library is only the first part of our overall goal and it is our first step towards ensuring that our design will scale within the growing organization. We have begun thinking about design with a system in mind by creating a design language that captures the visual styles of our brand, as well as creating reusable and robust components. We've chosen tools and technologies that help aid us in this process while remembering that they are always evolving and are not the system itself.\n\nBeyond continuing to build out new paradigms within our design library, our next step is to begin linking our design library with our frontend code. This will allow us to include not only our designs and documentation, but also code snippets that can be used and referenced in our application. We have only just started this process and are in the very early stages of setting up a [repository](https://gitlab.com/gitlab-org/gitlab-services/design.gitlab.com) to showcase our system.\n\nIf you have any tips, tricks, or lessons that you discovered while building out your own design library or system, we would love to hear from you!\n\n## Resources\n\n- [gitlab-elements.sketch](https://gitlab.com/gitlab-org/gitlab-design/blob/master/production/resources/gitlab-elements.sketch)\n- [GitLab Brand.ai](https://brand.ai/git-lab/primary-brand)\n- [Design Repo](https://gitlab.com/gitlab-org/gitlab-design)\n",[959,1144,915],{"slug":7789,"featured":6,"template":678},"gitlab-design-library","content:en-us:blog:gitlab-design-library.yml","Gitlab Design Library","en-us/blog/gitlab-design-library.yml","en-us/blog/gitlab-design-library",{"_path":7795,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7796,"content":7801,"config":7807,"_id":7809,"_type":16,"title":7810,"_source":17,"_file":7811,"_stem":7812,"_extension":20},"/en-us/blog/illustrations-and-icons-on-gitlab-com",{"title":7797,"description":7798,"ogTitle":7797,"ogDescription":7798,"noIndex":6,"ogImage":4351,"ogUrl":7799,"ogSiteName":692,"ogType":693,"canonicalUrls":7799,"schema":7800},"Inside GitLab: Illustrations and icons on GitLab.com","Learn how our UX team creates icons and illustrations.","https://about.gitlab.com/blog/illustrations-and-icons-on-gitlab-com","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Inside GitLab: Illustrations and icons on GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Hazel Yang\"}],\n        \"datePublished\": \"2017-12-04\",\n      }",{"title":7797,"description":7798,"authors":7802,"heroImage":4351,"date":7804,"body":7805,"category":14,"tags":7806},[7803],"Hazel Yang","2017-12-04","\nIn our 10.0 release, we introduced a [new navigation](/blog/unveiling-gitlabs-new-navigation/) complete with a redesigned color palette and icon set. We replaced [Font Awesome](http://fontawesome.io/icons/) with our own, SVG based, icon system, and we’ve also been hard at work on a series of illustrations to provide consistent visual language and improve our onboarding experience. Read on to find out more about how the UX team goes about creating new icons and illustrations.\n\n\u003C!-- more -->\n\nIllustrations and icons are powerful communication tools. They tell a story where words fail and can facilitate understanding across both language and culture barriers. Replacing text with illustrations and icons can make things clear at a glance. They also open up space and allow the eye to navigate more easily across the interface.\n\n## Illustrations\n\nA common mistake when designing a product is to assume that your users will understand how to use it. In reality, most users need a little help understanding where to start on their journey in order to discover all it has to offer. This is especially true for a product like GitLab, which is brimming with features. To assist users and [improve the onboarding experience](https://gitlab.com/gitlab-org/gitlab-ce/issues/15632), we decided to implement illustrations.\n\n### Defining the style\n\nTo begin, we reviewed our product’s existing styles to ensure that the illustrations we created would support a consistent brand experience for the application and our [official site](/).  During this review, we found that the visual design of these two products had diverged. The colors on our official website were vivid and energetic, orange and purple, while the colors of GitLab.com were soft and gentle, grey and white. Blending these two opposing styles into one set of illustrations was not going to be an easy task.\n\n{: .text-center}\n![gitlab-websites](https://about.gitlab.com/images/blogimages/illustrations-and-icons/gitlab-websites.png)\n\n### Visual consistency\n\nTo provide visual consistency across both products, we decided to pick up the primary, orange, and secondary, purple, colors from the official site for use in our illustrations. However, these two colors had a similar chroma and, used without modification, would create a jarring effect. Also, they just didn’t work well with the style of GitLab.com at the time. Our solution for this was to adjust the chroma of the two colors to generate new ones. These new colors played more harmoniously with the existing style of GitLab.com and allowed us to play with color in more creative ways.\n\n{: .text-center}\n![color-palettes](https://about.gitlab.com/images/blogimages/illustrations-and-icons/color-palettes.png)\n\n### Following GitLab values\n\n[Values](https://handbook.gitlab.com/handbook/values/) are important to us at GitLab. It was essential that our illustrations reflected these values and enhanced the brand experience to create a personal connection with our users. At GitLab we encourage people to maintain a positive attitude. Our illustrations needed to bring out a sense of playfulness, delight, and overall positivity.\n\n{: .text-center}\n![positive-illustration](https://about.gitlab.com/images/blogimages/illustrations-and-icons/positive-illustration.png){: .shadow}\n\nWe quickly found that these illustrations provided value as well as functionality. Used in an empty state, they inform users of features they may not know about and provide valuable onboarding. Used in error messaging, they quickly redirect users and get them back on track.\n\n{: .text-center}\n![errors-illustration](https://about.gitlab.com/images/blogimages/illustrations-and-icons/404.png){: .shadow}\n\nDiversity and inclusivity are essential to who we are as well. We have users, employees, and community members from many different cultural and geographical backgrounds. We reflected this variety of races, nationalities, and genders in the development of the illustrations for our [UX personas](https://design.gitlab.com/). We chose to use illustrations rather than stock photos. Illustrations make it easy to cover a variety of personas with no need to worry about copyrights.\n\n{: .text-center}\n![person-avatars](https://about.gitlab.com/images/blogimages/illustrations-and-icons/person-avatars.png){: .shadow}\n\nYou can find out more about our illustrations in the [handbook](https://docs.gitlab.com/ee/development/ux/).\n\n## Icons\n\nWhen GitLab was first in development, we chose Font Awesome as the primary icon set. It contained a variety of commonly used icons and was easy to implement. For an early-stage startup, it was a very useful tool.  \n\nAs GitLab matured, we needed more and more custom icons. These custom icons were created by our designers and, when mixed in with Font Awesome, led to an inconsistent visual style. Adding to the problem was the fact that we didn’t have a guide for icon usage. The lack of guidance caused [inconsistent](https://gitlab.com/gitlab-org/gitlab-ce/issues/29584) and [duplicated](https://gitlab.com/gitlab-org/gitlab-ce/issues/19751) icon usage to occur frequently. It confused users and had a detrimental effect on usability.\n\n### Creating our icons\n\nIt was time to build a consistent visual style and eliminate the confusion by [creating a complete custom icon set](https://gitlab.com/gitlab-org/gitlab-ce/issues/32894). Using distinct and unique iconography offered a powerful way to emphasize our unique personality.\n\n{: .text-center}\n![new-icon-set](https://about.gitlab.com/images/blogimages/illustrations-and-icons/new-icon-set.png){: .shadow}\n\nOnce again, consistency was key here. We gave our icons a thick border and rounded corners. Creating a consistent style between our illustrations and icons strengthened our brand identity by making it memorable and more easily recognizable.\n\nThick borders also help with accessibility. We were aware that some of our users adjusted their screen to higher resolutions, making an icon with a thin border harder to recognize. For this reason, we went with a `2x` width border.\n\n## The outcome\n\n### More recognizable and consistent visual language\n\nOur new color palette and icons on GitLab.com created a robust and consistent brand experience, making GitLab identifiable at a glance.\n\n### Illustrations for empty states and persona avatars\n\nMany of our empty state illustrations have been implemented, and we continue to develop more. You can see our avatar illustrations on [UX personas](https://design.gitlab.com/).\n\n{: .text-center}\n![example-empty-state](https://about.gitlab.com/images/blogimages/illustrations-and-icons/example-empty-state-issues.png){: .shadow}\n\n### Icons in contextual navigation and system notes\n\nWe have implemented most of our new icons on GitLab.com. You can find them in the [system notes](https://gitlab.com/gitlab-org/gitlab-ce/issues/24784) and [contextual navigation](https://gitlab.com/gitlab-org/gitlab-ce/issues/34027). Font Awesome will soon be completely phased out. We'd like to thank the Font Awesome team, their open source icon set allowed us to get very far, very fast!\n\n{: .text-center}\n![example-system-notes](https://about.gitlab.com/images/blogimages/illustrations-and-icons/system-notes.png){: .shadow}\n\n{: .text-center}\n![example-contextual-nav](https://about.gitlab.com/images/blogimages/illustrations-and-icons/contextual-nav-02.png){: .shadow}\n\n### Streamline process with the use of SVGs\n\nAll of our illustrations and icons are now exported as SVG files. Our Frontend AC Lead [Tim Zallmann](/company/team/#tpmtim) created [GitLab SVGs](http://gitlab-org.gitlab.io/gitlab-svgs/), a repository to manage all SVG Assets for GitLab. It creates SVG Sprites out of Icons and optimises SVG-based Illustrations. These are then exported to a live preview site. This enables the design team to add new icons and the frontend team to find icons quickly and easily.\n\n{: .text-center}\n![screenshot-gitlab-svgs](https://about.gitlab.com/images/blogimages/illustrations-and-icons/gitlab-svgs.png){: .shadow}\n\n## Conclusion\n\nYou will see GitLab's brand experience and UX design become more consistent and distinctive, and GitLab SVGs will soon be integrated into our [Design Library](https://gitlab.com/gitlab-org/gitlab-design/issues/26) we are working on. Stay tuned!\n",[959,1144,915],{"slug":7808,"featured":6,"template":678},"illustrations-and-icons-on-gitlab-com","content:en-us:blog:illustrations-and-icons-on-gitlab-com.yml","Illustrations And Icons On Gitlab Com","en-us/blog/illustrations-and-icons-on-gitlab-com.yml","en-us/blog/illustrations-and-icons-on-gitlab-com",{"_path":7814,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7815,"content":7821,"config":7826,"_id":7828,"_type":16,"title":7829,"_source":17,"_file":7830,"_stem":7831,"_extension":20},"/en-us/blog/support-engineering-at-gitlab",{"title":7816,"description":7817,"ogTitle":7816,"ogDescription":7817,"noIndex":6,"ogImage":7818,"ogUrl":7819,"ogSiteName":692,"ogType":693,"canonicalUrls":7819,"schema":7820},"At your service: Support Engineering at GitLab","A new series from GitLab Support Engineering about what we do and how we do it. All remotely of course!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678875/Blog/Hero%20Images/support-series-cover.png","https://about.gitlab.com/blog/support-engineering-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"At your service: Support Engineering at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lee Matos\"}],\n        \"datePublished\": \"2017-12-01\",\n      }",{"title":7816,"description":7817,"authors":7822,"heroImage":7818,"date":7823,"body":7824,"category":14,"tags":7825},[7035],"2017-12-01","\n\nHi! I’m [Lee Matos](/company/team/#leematos), [Support Team](/handbook/support/) Lead at GitLab and I’m very excited to be kicking off our blog series about what Support Engineering means at GitLab. One of the biggest things that people start with is, \"What’s the difference between Support Engineering and Customer Service?\" Great question! Let’s talk about it.\n\n\u003C!-- more -->\n\n## Support Engineering vs. Customer Service?\n\nTo start, I think Customer Service is a subset of Support Engineering. To be a great support engineer, you should be customer focused, but also technically minded. We address customers' needs via web calls and email daily. So those interactions are where a customer focus is paramount, but we’ll often be debugging Redis Queues or finding slow SQL queries. This is not just relationship management. It’s sussing out the bugs and then squashing them. I think that’s pretty common for support engineering, but we have some unique quirks too.\n\n## What’s unique about Support Engineering at GitLab?\n\nAt GitLab, transparency is a core value. Because of that, our issue trackers are public. This is great for Support because in traditional support models, we act as a router between the company and the customer. What I mean to say by that is that Support is responsible for keeping the customer in the loop as to the status and state of a bug fix or such by holding the ticket open until it gets resolved.\n\nWith our transparency, we get to act more like a pipe fitter. We connect the customer to the public issue, and from there they can see when it’s scheduled (and even when it gets delivered and by whom!) and if they choose, they can engage directly with the team responsible. This is unprecedented access into product development. It also allows Support to be smart about making the connection, but to give the ownership to the actual team responsible for delivering the work. Speaking of which, let’s talk about the Support Team right now.\n\n## How big is the team?\n\nWe are currently 12 global hooligans and we are looking for more. We are finding our volume of requests are best served by people based in EMEA -> East Coast America so we are targeting those regions to hire. This is great because everyone gets to work a “9-5,” but by leveraging remote work, we can easily get 24/5 coverage. This is huge.\n\nIf you are reading this and finding yourself interested in learning more, [we are hiring](/jobs/). We’d love to have you join our team if this sounds right for you.\n\nI’ll be writing more over the next months about how we stay connected remotely, how we communicate across teams, and how to make successful remote internships amongst other things. I hope you’ll enjoy the journey!\n\n-Lee\n",[915],{"slug":7827,"featured":6,"template":678},"support-engineering-at-gitlab","content:en-us:blog:support-engineering-at-gitlab.yml","Support Engineering At Gitlab","en-us/blog/support-engineering-at-gitlab.yml","en-us/blog/support-engineering-at-gitlab",{"_path":7833,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7834,"content":7840,"config":7845,"_id":7847,"_type":16,"title":7848,"_source":17,"_file":7849,"_stem":7850,"_extension":20},"/en-us/blog/containers-kubernetes-basics",{"title":7835,"description":7836,"ogTitle":7835,"ogDescription":7836,"noIndex":6,"ogImage":7837,"ogUrl":7838,"ogSiteName":692,"ogType":693,"canonicalUrls":7838,"schema":7839},"Kubernetes & containers, and where cloud native fits in – the basics","Brush up on your understanding of these concepts key to modern development.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671296/Blog/Hero%20Images/containers-kubernetes-basics.jpg","https://about.gitlab.com/blog/containers-kubernetes-basics","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Kubernetes & containers, and where cloud native fits in – the basics\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2017-11-30\",\n      }",{"title":7835,"description":7836,"authors":7841,"heroImage":7837,"date":7842,"body":7843,"category":14,"tags":7844},[4182],"2017-11-30","\n\nWe throw around terms like Kubernetes, containers, and cloud native with some abandon, but sometimes take it for granted that everyone knows what's what. So here we go...\n\n\u003C!-- more -->\n\n## Container explainer\n\nA container is a method of operating system-based virtualization that allows\nyou to securely run an application and its dependencies independently without\nimpacting other containers or the operating system.\n\nBefore containers, it was common to use virtual machines (VMs) to provide a safe, sandbox environment in which to test software, within a computer. A container works much like a virtual machine except that, instead of packaging\nyour code with an operating system, it is run as a Linux process\ninside of the kernel. This means that each container only contains the code and dependencies needed to run that specific application, making them smaller and faster to run.\n\n![Containers vs virtual machines vs bare metal](https://about.gitlab.com/images/blogimages/containers-vm-bare-metal.png){: .medium.center}\n\n*\u003Csmall>Containers retain the same repeatability factor as virtual machines, but are much faster and use fewer resources to run.\u003C/small>*\n\n## Kuber... what?\n\nKubernetes is primarily a container scheduler – an open source platform designed to automate your management of application containers, from deploying and scaling to operating.\n\nWhile virtualization technology statically partitions your servers into smaller VMs, Kubernetes allows you to partition as you go, depending on how much or little resources are needed at the time, scaling up and down as necessary. You can respond quickly and efficiently to customer demand while limiting hardware usage and minimizing disruption to feature rollouts. With container schedulers, the focus shifts from the machine to the service – the machine becomes an ephemeral, disposable element.\n\nWhat's more, using containers in this way means they are decoupled from the host filesystem and underlying infrastructure, making them portable across clouds and operating systems.\n\n## Containers + Kubernetes \u003Ci class=\"fas fa-arrow-right\" aria-hidden=\"true\">\u003C/i> cloud native\n\nWhich brings us to [cloud native development](/topics/cloud-native/). Cloud native applications embrace a new approach to building and running applications that takes full advantage of the cloud computing model and container schedulers such as Kubernetes.\n\nNot to be confused with running traditional applications in the cloud, cloud native means that applications are purpose-built for the cloud, and consist of loosely coupled services. Applications are re-architected for running in the cloud – shifting the focus away from the machine to the service instead. Cloud native acknowledges that the cloud is about more than just who manages your servers – it is the next step in digital transformation.\n\nBy building applications that can run on any cloud, right out of the box, you’re free to migrate and distribute across vendors in line with your budget and business priorities. You also free up developer time – they don’t have to write code to run and scale across a range of cloud infrastructures, so they can focus on improvements and new features.\n\nSound good? We think so! Visit [about.gitlab.com/kubernetes](/solutions/kubernetes/) to learn more about how GitLab and Kubernetes can get you to cloud native nirvana.\n\n[Cover image](https://unsplash.com/@guibolduc?photo=uBe2mknURG4) by [Guillaume Bolduc](https://unsplash.com/@guibolduc) on Unsplash\n{: .note}\n",[1002,873],{"slug":7846,"featured":6,"template":678},"containers-kubernetes-basics","content:en-us:blog:containers-kubernetes-basics.yml","Containers Kubernetes Basics","en-us/blog/containers-kubernetes-basics.yml","en-us/blog/containers-kubernetes-basics",{"_path":7852,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7853,"content":7859,"config":7865,"_id":7867,"_type":16,"title":7868,"_source":17,"_file":7869,"_stem":7870,"_extension":20},"/en-us/blog/go-tools-and-gitlab-how-to-do-continuous-integration-like-a-boss",{"title":7854,"description":7855,"ogTitle":7854,"ogDescription":7855,"noIndex":6,"ogImage":7856,"ogUrl":7857,"ogSiteName":692,"ogType":693,"canonicalUrls":7857,"schema":7858},"Go tools and GitLab: How to do continuous integration like a boss","How the team at Pantomath makes their lives easier with GitLab CI.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667978/Blog/Hero%20Images/go-tools-and-gitlab.jpg","https://about.gitlab.com/blog/go-tools-and-gitlab-how-to-do-continuous-integration-like-a-boss","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Go tools and GitLab: How to do continuous integration like a boss\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Julien Andrieux\"}],\n        \"datePublished\": \"2017-11-27\",\n      }",{"title":7854,"description":7855,"authors":7860,"heroImage":7856,"date":7862,"body":7863,"category":14,"tags":7864},[7861],"Julien Andrieux","2017-11-27","\n\nAt [Pantomath](https://pantomath.io/), we use [GitLab](/) for all our development work. The purpose of this paper is not to present GitLab and all [its features](/pricing/feature-comparison/), but to introduce how we use these tools to ease our lives. So what is it all about? To automate everything that is related to your development project, and let you focus on your code.\n\n\u003C!-- more -->\n\nWe’ll cover the [lint](https://en.wikipedia.org/wiki/Lint_(software)), [unit tests](https://en.wikipedia.org/wiki/Unit_testing), [data race](https://en.wikipedia.org/wiki/Race_condition), [memory sanitizer](https://clang.llvm.org/docs/MemorySanitizer.html), [code coverage](https://en.wikipedia.org/wiki/Code_coverage), and build.\n\nAll the source code shown in this post is available at [gitlab.com/pantomath-io/demo-tools](https://gitlab.com/pantomath-io/demo-tools). So feel free to get the repository, and use the tags to navigate in it. The repository should be placed in the `src` folder of your `$GOPATH`:\n\n```bash\n$ go get -v -d gitlab.com/pantomath-io/demo-tools\n$ cd $GOPATH/src/gitlab.com/pantomath-io/demo-tools\n```\n\n### Go tools\n\nLuckily, `Go` — the open source programming language also known as golang — comes with a [lot of useful tools](https://golang.org/cmd/go/), to build, test, and check your code. In fact, it’s all there. We’ll just add extra tools to glue them together. But before we go there, we need to take them one by one, and see what they do.\n\n#### Package list\n\nYour Go project is a collection of packages, as described in the [official doc](https://golang.org/doc/code.html). Most of the following tools will be fed with these packages, and thus the first command we need is a way to list the packages. Hopefully, the `Go` language covers our back with the `list` subcommand ([read the fine manual](https://golang.org/cmd/go/#hdr-List_packages) and this [excellent post from Dave Cheney](https://dave.cheney.net/2014/09/14/go-list-your-swiss-army-knife)):\n\n```bash\n$ go list ./...\n```\n\nNote that we want to avoid applying our tools on external packages or resources, and restrict it to **our** code. So we need to get rid of the [vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories):\n\n```bash\n$ go list ./... | grep -v /vendor/\n```\n\n#### Lint\n\nThis is the very first tool we use on the code: the linter. Its role is to make sure that the code respects the code style. This may sounds like an optional tool, or at least a “nice-to-have” but it really helps to keep consistent style over your project.\n\nThis linter is not part of Go *per se*, so you need to grab it and install it by hand (see [official doc](https://github.com/golang/lint)).\n\nThe usage is fairly simple: you just run it on the packages of your code (you can also point the `.go` files):\n\n```bash\n$ golint -set_exit_status $(go list ./... | grep -v /vendor/)\n```\n\nNote the `-set_exit_status` option. By default, `golint` only prints the style issues, and returns (with a 0 return code), so the CI never considers something went wrong. If you specify the `-set_exit_status`, the return code from `golint` will be different from 0 if any style issue is encountered.\n\n#### Unit test\n\nThese are the most common tests you can run on your code. For each `.go` file, we need to have an associated `_test.go` file holding the unit tests. You can run the tests for all the packages with the following command:\n\n```bash\n$ go test -short $(go list ./... | grep -v /vendor/)\n```\n\n#### Data race\n\nThis is usually a hard subject to cover, but the `Go` tool has it by default (but only available on `linux/amd64`, `freebsd/amd64`, `darwin/amd64` and `windows/amd64`). For more information about data race, see [this article](https://golang.org/doc/articles/race_detector.html). Meanwhile, here is how to run it:\n\n```bash\n$ go test -race -short $(go list ./... | grep -v /vendor/)\n```\n\n#### Memory sanitizer\n\nClang has a nice detector for uninitialized reads called [MemorySanitizer](https://clang.llvm.org/docs/MemorySanitizer.html). The `go test` tool is kind enough to interact with this Clang module (as soon as you are on `linux/amd64` host and using a recent version of Clang/LLVM (`>=3.8.0`). This command is how to run it:\n\n```bash\n$ go test -msan -short $(go list ./... | grep -v /vendor/)\n```\n\n#### Code coverage\n\nThis is also a must have to evaluate the health of your code, and see what the part of code is under unit tests and what part is not. [Rob Pike](https://twitter.com/rob_pike) wrote a [full post on that very subject](https://blog.golang.org/cover).\n\nTo calculate the code coverage ratio, we need to run the following script:\n\n```bash\n$ PKG_LIST=$(go list ./... | grep -v /vendor/)\n$ for package in ${PKG_LIST}; do\n    go test -covermode=count -coverprofile \"cover/${package##*/}.cov\" \"$package\" ;\ndone\n$ tail -q -n +2 cover/*.cov >> cover/coverage.cov\n$ go tool cover -func=cover/coverage.cov\n```\n\nIf we want to get the coverage report in HTML format, we need to add the following command:\n\n```bash\n$ go tool cover -html=cover/coverage.cov -o coverage.html\n```\n\n#### Build\n\nLast but not least, once the code has been fully tested, we might want to compile it to make sure we can build a working binary.\n\n```bash\n$ go build -i -v gitlab.com/pantomath-io/demo-tools\n```\n\n### Makefile\n\n*git tag:* [init-makefile](https://gitlab.com/pantomath-io/demo-tools/tags/init-makefile)\n\n![](https://cdn-images-1.medium.com/max/1600/1*Ip_q_6I-kNpUjuPMOutuTA.jpeg)\n*\u003Csmall>Photo by [Matt Artz](https://unsplash.com/photos/qJE5Svhs2ek?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\u003C/small>*\n\nNow we have all the tools that we may use in the context of continuous integration, we can wrap them all in a [Makefile](https://gitlab.com/pantomath-io/demo-tools/blob/init-makefile/Makefile), and have a consistent way to call them.\n\nThe purpose of this doc is not to present `make`, but you can refer to [official documentation](https://www.gnu.org/software/make/manual/make.html) to learn more about it.\n\n    PROJECT_NAME := \"demo-tools\"\n    PKG := \"gitlab.com/pantomath-io/$(PROJECT_NAME)\"\n    PKG_LIST := $(shell go list ${PKG}/... | grep -v /vendor/)\n    GO_FILES := $(shell find . -name '*.go' | grep -v /vendor/ | grep -v _test.go)\n\n    .PHONY: all dep build clean test coverage coverhtml lint\n\n    all: build\n\n    lint: ## Lint the files\n      @golint -set_exit_status ${PKG_LIST}\n\n    test: ## Run unittests\n      @go test -short ${PKG_LIST}\n\n    race: dep ## Run data race detector\n      @go test -race -short ${PKG_LIST}\n\n    msan: dep ## Run memory sanitizer\n      @go test -msan -short ${PKG_LIST}\n\n    coverage: ## Generate global code coverage report\n      ./tools/coverage.sh;\n\n    coverhtml: ## Generate global code coverage report in HTML\n      ./tools/coverage.sh html;\n\n    dep: ## Get the dependencies\n      @go get -v -d ./...\n\n    build: dep ## Build the binary file\n      @go build -i -v $(PKG)\n\n    clean: ## Remove previous build\n      @rm -f $(PROJECT_NAME)\n\n    help: ## Display this help screen\n      @grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = \":.*?## \"}; {printf \"\\033[36m%-30s\\033[0m %s\\n\", $$1, $$2}'\n\nWhat do we have now? One target for any tool previously presented, and three more targets for:\n\n* installation of dependencies (`dep`);\n* housekeeping of the project (`clean`);\n* some nice and shiny help (`help`).\n\nNote that we also had to create a script for the code coverage work. This is because implementing loops over files in a Makefile is a pain. So the work is done in a `bash` script, and the Makefile only triggers this script.\n\nYou can try the Makefile with the following commands:\n\n    $ make help\n    $ make lint\n    $ make coverage\n\n### Continuous integration\n\n*git tag:* [init-ci](https://gitlab.com/pantomath-io/demo-tools/tags/init-ci)\n\nNow the tools are in place, and we can run various tests on our code, we’d like to automate these, on your repository. Luckily, GitLab offers [CI pipelines](/solutions/continuous-integration/) just for this. And the setup for this is pretty straightforward: all you create is a `.gitlab-ci.yml` file at the root of the repository.\n\nThe [full documentation](https://docs.gitlab.com/ee/ci/yaml/) on this Yaml file presents all the options, but you can start with this `.gitlab-ci.yml`:\n\n```yaml\nimage: golang:1.9\n\ncache:\n  paths:\n    - /apt-cache\n    - /go/src/github.com\n    - /go/src/golang.org\n    - /go/src/google.golang.org\n    - /go/src/gopkg.in\n\nstages:\n  - test\n  - build\n\nbefore_script:\n  - mkdir -p /go/src/gitlab.com/pantomath-io /go/src/_/builds\n  - cp -r $CI_PROJECT_DIR /go/src/gitlab.com/pantomath-io/pantomath\n  - ln -s /go/src/gitlab.com/pantomath-io /go/src/_/builds/pantomath-io\n  - make dep\n\nunit_tests:\n  stage: test\n  script:\n    - make test\n\nrace_detector:\n  stage: test\n  script:\n    - make race\n\nmemory_sanitizer:\n  stage: test\n  script:\n    - make msan\n\ncode_coverage:\n  stage: test\n  script:\n    - make coverage\n\ncode_coverage_report:\n  stage: test\n  script:\n    - make coverhtml\n  only:\n  - master\n\nlint_code:\n  stage: test\n  script:\n    - make lint\n\nbuild:\n  stage: build\n  script:\n    - make\n```\n\nIf you break down the file, here are some explanations on its content:\n\n* The first thing is to choose what Docker image will be used to run the CI. Head to the [Docker Hub](https://hub.docker.com/) to choose the right image for your project.\n* Then, you specify some folders of this image [to be cached](https://docs.gitlab.com/ee/ci/yaml/#cache). The goal here is to avoid downloading the same content several times. Once a job is completed, the listed paths will be archived, and next job will use the same archive.\n* You define the different `stages` that will group your jobs. In our case, we have two [stages](https://docs.gitlab.com/ee/ci/yaml/#stages) (to be processed in that order): `test` and `build`. We could have other stages, such as `deploy`.\n* The `before_script` [section](https://docs.gitlab.com/ee/ci/yaml/#before_script) defines the commands to run in the Docker container right before the job is actually done. In our context, the commands just copy or link the repository deployed in the `$GOPATH`, and install dependencies.\n* Then come the actual [jobs](https://docs.gitlab.com/ee/ci/jobs/), using the `Makefile` targets. Note the special case for `code_coverage_report` where execution is restricted to the `master` branch (we don’t want to update the code coverage report from feature branches for instance).\n\nAs we commit/push the `.gitlab-ci.yml` file in the repository, the CI is [automatically triggered](https://gitlab.com/pantomath-io/demo-tools/pipelines/13481935). And the pipeline fails. Howcome?\n\nThe `lint_code` [job](https://gitlab.com/pantomath-io/demo-tools/-/jobs/38690212) fails because it can’t find the `golint` binary:\n\n```bash\n$ make lint\nmake: golint: Command not found\nMakefile:11: recipe for target 'lint' failed\nmake: *** [lint] Error 127\n```\n\nSo, [update](https://gitlab.com/pantomath-io/demo-toolscommit/17a0206eb626504e559f56773e2d81c7b5808dbe) your `Makefile` to install `golint` as part of the `dep` target.\n\nThe `memory_sanitizer` [job](https://gitlab.com/pantomath-io/demo-tools/-/jobs/38690209) fails because `gcc` complains:\n\n```bash\n$ make msan\n# runtime/cgo\ngcc: error: unrecognized argument to -fsanitize= option: 'memory'\nMakefile:20: recipe for target 'msan' failed\nmake: *** [msan] Error 2\n```\n\nBut remember we need to use Clang/LLVM `>=3.8.0` to enjoy the `-msan` option in `go test` command.\n\nWe have two options here:\n\n* either we set up Clang in the job (using `before_script`);\n* or we use a Docker image with Clang installed by default.\n\nThe first option is nice, but that implies to have this setup done **for every single job**. This is going to be so long, we should do it once and for all. So we prefer the second option, which is a good way to play with [GitLab Registry](https://docs.gitlab.com/ee/user/packages/container_registry/index.html).\n\n*git tag:* [use-own-docker](https://gitlab.com/pantomath-io/demo-tools/tags/use-own-docker)\n\nWe need to create a [Dockerfile](https://gitlab.com/pantomath-io/demo-tools/blob/use-own-docker/Dockerfile) for the container (as usual: read the [official documentation](https://docs.docker.com/engine/reference/builder) for more options about it):\n\n    # Base image:\n    FROM golang:1.9\n    MAINTAINER Julien Andrieux \u003Cjulien@pantomath.io>\n\n    # Install golint\n    ENV GOPATH /go\n    ENV PATH ${GOPATH}/bin:$PATH\n    RUN go get -u github.com/golang/lint/golint\n\n    # Add apt key for LLVM repository\n    RUN wget -O -\n     | apt-key add -\n\n    # Add LLVM apt repository\n    RUN echo \"deb\n     llvm-toolchain-stretch-5.0 main\" | tee -a /etc/apt/sources.list\n\n    # Install clang from LLVM repository\n    RUN apt-get update && apt-get install -y --no-install-recommends \\\n        clang-5.0 \\\n        && apt-get clean \\\n        && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*\n\n    # Set Clang as default CC\n    ENV set_clang /etc/profile.d/set-clang-cc.sh\n    RUN echo \"export CC=clang-5.0\" | tee -a ${set_clang} && chmod a+x ${set_clang}\n\nThe container built out of this Dockerfile will be based on [golang:1.9](https://hub.docker.com/_/golang/) image (the one referenced in the `.gitlab-ci.yml` file).\n\nWhile we’re at it, we install `golint` in the container, so we have it available. Then we follow [official way](http://apt.llvm.org/) of installing Clang 5.0 from LLVM repository.\n\nNow we have the Dockerfile in place, we need to build the container image and make it available for GitLab:\n\n```bash\n$ docker login registry.gitlab.com\n$ docker build -t registry.gitlab.com/pantomath-io/demo-tools .\n$ docker push registry.gitlab.com/pantomath-io/demo-tools\n```\n\nThe first command connects you to the GitLab Registry. Then you build the container image described in the Dockerfile. And finally, you push it to the GitLab Registry.\n\nTake a look at the [Registry for your repository](https://gitlab.com/pantomath-io/demo-tools/container_registry), you’ll see your image, ready to be used. And to have the CI using your image, you just need to update the `.gitlab-ci.yml` file:\n\n    image: golang:1.9\n\nbecomes\n\n    image: registry.gitlab.com/pantomath-io/demo-tools:latest\n\nOne last detail: you need to tell the CI to use the proper compiler (i.e. the `CC` environment variable), so we add the variable initialization in the `.gitlab-ci.yml` file:\n\n    export CC=clang-5.0\n\nOnce the modification are done, next commit will trigger the pipeline, which now works:\n\n[gitlab.com/pantomath-io/demo-tools/pipelines/13497136](https://gitlab.com/pantomath-io/demo-tools/pipelines/13497136)\n\n### Badges\n\n*git tag:* [init-badges](https://gitlab.com/pantomath-io/demo-tools/tags/init-badges)\n\n![](https://cdn-images-1.medium.com/max/1600/1*0pY_6oCiHZ_eLh0vfg5rDA.jpeg)\n\n*\u003Csmall>Photo by [Jakob Owens](https://unsplash.com/photos/ZBadHaTUkP0?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\u003C/small>*\n\nNow the tools are in place, every commit will launch a test suite, and you probably want to show it, and that’s legitimate :) The best way to do so is to use badges, and the best place for it is the `README` [file](https://gitlab.com/pantomath-io/demo-tools/blob/init-badges/README.md).\n\nEdit it and add the four following badges:\n\n* Build Status: the status of the last pipeline on the `master` branch:\n\n```\n[![Build Status](https://gitlab.com/pantomath-io/demo-tools/badges/master/build.svg)](https://gitlab.com/pantomath-io/demo-tools/commits/master)\n```\n\n* Coverage Report: the percentage of source code covered by tests\n\n```\n[![Coverage Report](https://gitlab.com/pantomath-io/demo-tools/badges/master/coverage.svg)](https://gitlab.com/pantomath-io/demo-tools/commits/master)\n```\n\n* Go Report Card:\n\n```\n[![Go Report Card](https://goreportcard.com/badge/gitlab.com/pantomath-io/demo-tools)](https://goreportcard.com/report/gitlab.com/pantomath-io/demo-tools)\n```\n\n* License:\n\n```\n[![License MIT](https://img.shields.io/badge/License-MIT-brightgreen.svg)](https://img.shields.io/badge/License-MIT-brightgreen.svg)\n```\n\nThe coverage report needs a special configuration. You need to tell GitLab how to get that information, considering that there is a job in the CI that *displays* it when it runs.\u003Cbr> There is a [configuration](https://gitlab.com/help/user/project/pipelines/settings#test-coverage-parsing) to provide GitLab with a regexp, used in any job’ output. If the regexp matches, GitLab consider the match to be the code coverage result.\n\nSo head to `Settings > CI/CD` in your repository, scroll down to the `Test coverage parsing` setting in the `General pipelines settings` section, and use the following regexp:\n\n    total:\\s+\\(statements\\)\\s+(\\d+.\\d+\\%)\n\nYou’re all set! Head to the [overview of your repository](https://gitlab.com/pantomath-io/demo-tools/tree/init-badges), and look at your `README`:\n\n### Conclusion\n\nWhat’s next? Probably more tests in your CI. You can also look at the CD ([Continuous Deployment](https://docs.gitlab.com/ee/ci/environments/index.html)) to automate the deployment of your builds. The documentation can be done using [GoDoc](https://godoc.org/-/about). Note that you generate a coverage report with the `code_coverage_report`, but don’t use it in the CI. You can make the job copy the HTML file to a web server, using `scp` (see this [documentation](https://docs.gitlab.com/ee/ci/ssh_keys/) on how to use SSH keys).\n\nMany thanks to [Charles Francoise](https://dev.to/loderunner) who co-wrote this paper and [gitlab.com/pantomath-io/demo-tools](https://gitlab.com/pantomath-io/demo-tools).\n\n## About the Guest Author\n\nJulien Andrieux is currently working on Pantomath. Pantomath is a modern, open source monitoring solution, built for performance, that bridges the gaps across all levels of your company. The wellbeing of your infrastructure is everyone’s business. [Keep up with the project](http://goo.gl/tcxtXq).\n\n *[Go tools & GitLab - how to do Continuous Integration like a boss](https://medium.com/pantomath/go-tools-gitlab-how-to-do-continuous-integration-like-a-boss-941a3a9ad0b6) was originally published on Medium.*\n\n*Cover photo by [Todd Quackenbush](https://unsplash.com/photos/IClZBVw5W5A?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)*\n{: .note}\n",[110,4440,726],{"slug":7866,"featured":6,"template":678},"go-tools-and-gitlab-how-to-do-continuous-integration-like-a-boss","content:en-us:blog:go-tools-and-gitlab-how-to-do-continuous-integration-like-a-boss.yml","Go Tools And Gitlab How To Do Continuous Integration Like A Boss","en-us/blog/go-tools-and-gitlab-how-to-do-continuous-integration-like-a-boss.yml","en-us/blog/go-tools-and-gitlab-how-to-do-continuous-integration-like-a-boss",{"_path":7872,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7873,"content":7879,"config":7885,"_id":7887,"_type":16,"title":7888,"_source":17,"_file":7889,"_stem":7890,"_extension":20},"/en-us/blog/autoscale-ci-runners",{"title":7874,"description":7875,"ogTitle":7874,"ogDescription":7875,"noIndex":6,"ogImage":7876,"ogUrl":7877,"ogSiteName":692,"ogType":693,"canonicalUrls":7877,"schema":7878},"Autoscale GitLab CI/CD runners and save 90% on EC2 costs","Guest author Max Woolf shows how his team makes big savings with an autoscaling cluster of GitLab CI/CD runners.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680305/Blog/Hero%20Images/autoscale-gitlab-ci-runners.jpg","https://about.gitlab.com/blog/autoscale-ci-runners","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Autoscale GitLab CI/CD runners and save 90% on EC2 costs\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Max Woolf\"}],\n        \"datePublished\": \"2017-11-23\",\n      }",{"title":7874,"description":7875,"authors":7880,"heroImage":7876,"date":7882,"body":7883,"category":14,"tags":7884},[7881],"Max Woolf","2017-11-23","\n\nAt [Substrakt Health](https://substrakthealth.com/), we use continuous integration workers to test our software every time new code is written and pushed, but that computing capacity can be expensive and hard to predict. This tutorial shows you how to set up an autoscaling [cluster of GitLab CI/CD](/topics/ci-cd/) runners using docker-machine and AWS.\n\n\u003C!-- more -->\n\nCode quality is **always** a top priority for us. We want to know that our code works every time and when it stops working we want to know immediately. We use [GitLab CI/CD](/solutions/continuous-integration/) to run our tests every time we push new code and before every deployment. GitLab CI/CD lets us split this work across multiple servers and scale up and down capacity as required to keep costs down for us. This tutorial will show you how to set up an autoscaling CI/CD cluster for GitLab and save up to 90 percent on costs using AWS EC2 Spot Instances.\n\nGitLab CI/CD allows us to split our jobs across multiple machines. By default, each new worker node requires some setup work to provision and attach it to our GitLab instance, but we can also use the autoscaling mode to provision a single machine and let that machine decide how much capacity is required and then spin up or down further instances as required.\n\n>**A warning**: This tutorial will not be covered entirely by the AWS free usage tier. It’s going to cost money to try this out.\n\n## Creating the spawner\n\nFirst off, we need a spawner machine. This runs 24/7 and checks that GitLab CI/CD has enough capacity to run the jobs currently in the queue. **It doesn’t run any jobs itself.**\n\nWe use Ubuntu 16.04 LTS for our internal tooling, so just create an EC2 instance (*t2.micro* is enough and is included in the free tier.) Setting up VPCs and related subnets is out of the scope of this article, we’ll assume that you’re working in the default VPC. Then we need to install a bunch of software on our machine to set it up.\n\n## Installing gitlab-runner\n\ngitlab-runner is the main software we need to complete this task. Installing it on Ubuntu is really easy.\n\n```\ncurl -L https://packages.gitlab.com/install/repositories/runner/gitlab-ci-multi-runner/script.deb.sh | sudo bash\n```\n\n```\nsudo apt-get install gitlab-ci-multi-runner\n```\n\n\u003Cimg src=\"/images/blogimages/auto-scale-ci-runners-gif.gif\" alt=\"Installing gitlab-runner\" style=\"width: 700px;\"/>{: .shadow}\n\nOnce you’ve done that, register the runner on your GitLab instance. Do this as you normally would with any other GitLab CI/CD runner but choose **docker+machine** as the runner. Docker Machine is the software required to spin up new virtual machines and install Docker on them.\n\n## Installing Docker Machine\n\nDocker Machine is a handy bit of software that allows one host running Docker to spin up and provision other machines running Docker. Installing it is even easier:\n\n```\ncurl -L https://github.com/docker/machine/releases/download/v0.12.2/docker-machine-`uname -s`-`uname -m` >/tmp/docker-machine &&\nchmod +x /tmp/docker-machine &&\nsudo cp /tmp/docker-machine /usr/local/bin/docker-machine\n```\n\nThis will install the docker-machine binary in your PATH.\n\n## Configuring gitlab-runner\n\nBy default, gitlab-runner will not work in the autoscaling mode we want. It’ll just run a job by default and then stop. We want to configure this machine to no longer run tests but to spin up new Docker Machines as and when necessary. Open your gitlab-runner config file, usually found in `/etc/gitlab-runner/config.toml` and make some changes. This is our example (with sensitive information removed). Let’s go through some of the important lines.\n\n```\nconcurrent = 12\ncheck_interval = 0\n\n[[runners]]\n  name = \"aws-gitlab-runner-spawner\"\n  limit = 6\n  url = \"https://git.substrakt.com/ci\"\n  token = \"xxxxx\"\n  executor = \"docker+machine\"\n  [runners.docker]\n    tls_verify = false\n    image = \"ruby:2.3.1\"\n    privileged = true\n    disable_cache = false\n    volumes = [\"/cache\"]\n    shm_size = 0\n  [runners.machine]\n    IdleCount = 0\n    MachineDriver = \"amazonec2\"\n    MachineName = \"runner-%s\"\n    MachineOptions = [\"amazonec2-access-key=XXXX\", \"amazonec2-secret-key=XXXX\", \"amazonec2-ssh-user=ubuntu\", \"amazonec2-region=eu-west-2\", \"amazonec2-instance-type=m4.xlarge\", \"amazonec2-ami=ami-996372fd\", \"amazonec2-vpc-id=vpc-xxxxx\", \"amazonec2-subnet-id=subnet-xxxxx\", \"amazonec2-zone=a\", \"amazonec2-root-size=32\", \"amazonec2-request-spot-instance=true\", \"amazonec2-spot-price=0.03\"]\n    IdleTime = 1800\n```\n\n```\nconcurrent = 12\n```\n\nThis tells GitLab CI/CD that in total, it should attempt to run 12 jobs simultaneously across all child workers.\n\n```\nlimit = 6\n```\n\nThis tells GitLab CI/CD that in total, it should use for running jobs a maximum of six worker nodes. You’ll need to tweak this value depending on the resources your jobs need and the resources of your child nodes. There’s no right answer here but generally we found it wasn’t a good idea to have more than the number of CPUs – 1 of jobs running per node but again this is a bit of a ‘finger-in-the-air’ calculation as it really depends on your tech stack.\n\n```\nIdleCount = 0\n```\n\nThis tells GitLab CI/CD not to run any machines constantly (whilst idle). This means when nobody is running a job, or no jobs are queued to spin down all of the worker nodes after an amount of time (IdleTime at the bottom of the file). We power our nodes down after half an hour of no use. This does have the consequence of there being a short wait when we start our day, but it saves us money as we’re not using computing power when it’s not required.\n\nIf you're interested in more about how `concurrent`, `limit` and `IdleCount` are defining the maximum number of jobs and nodes that will be used, you can find a more detailed description in Runner's autoscale configuration document: [Autoscaling algorithm and parameters](https://docs.gitlab.com/runner/configuration/autoscale.html#autoscaling-algorithm-and-parameters), [How parameters generate the upper limit of running machines](https://docs.gitlab.com/runner/configuration/autoscale.html#how-concurrent-limit-and-idlecount-generate-the-upper-limit-of-running-machines).\n\n```\nMachineOptions = [\"amazonec2-access-key=XXXX\", \"amazonec2-secret-key=XXXX\", \"amazonec2-ssh-user=ubuntu\", \"amazonec2-region=eu-west-2\", \"amazonec2-instance-type=m4.xlarge\", \"amazonec2-ami=ami-996372fd\", \"amazonec2-vpc-id=vpc-xxxxx\", \"amazonec2-subnet-id=subnet-xxxxx\", \"amazonec2-zone=a\", \"amazonec2-root-size=32\", \"amazonec2-request-spot-instance=true\", \"amazonec2-spot-price=0.03\"]\n```\n\nThis is where the magic happens. This is where we set our options for Docker Machine. It defines the size, type and price of our runners. I’ll run through each of the non-obvious options.\n\n```\namazonec2-vpc-id=vpc-xxxxx & amazonec2-subnet-id=subnet-xxxxx\n```\n\nThis is the VPC and associated subnet ID. Generally, you’d want this in your default VPC in a public subnet. We run our jobs in a private VPC with internal peering connections to other VPCs due to regulatory constraints.\n\n```\namazonec2-region=eu-west-2\n```\n\nThis is the AWS region. We run all of our infrastructure in the EU (London) region.\n\n```\namazonec2-instance-type=m4.xlarge\n```\n\nThis is the size of the instance we want for each of our runners. This setting can have massive implications on cost and it can be a tricky balancing act. Choose too small and your jobs take forever to run due to a lack of resources (more time = more money) but choose too large and you have unused compute capacity which costs you money you don’t need to spend. Again, there’s no right answer here, it’s about what works for your workload. We found m4.xlarge works for us.\n\n## Save up to 90 percent on EC2 costs using Spot Instances\n\nSpot Instances are magic. They allow us to bid for unused capacity in the AWS infrastructure and often can mean that EC2 costs can be dramatically lower. We’re currently seeing discounts of around 85 percent on our EC2 bills due to using Spot Instances. Setting them up for use on GitLab CI/CD is really easy too. There is (of course) a downside. If our bid price for VMs is exceeded, then our instances shut down with only a few minutes notice. But as long as our bid is high enough, this isn’t an issue. Pricing in the spot market is insanely complex but in eu-west-2 at least, prices for m4.large and xlarge instances appear to have been static for months so a bid 10-20 percent higher than the current spot price appears to be a safe bet. Just keep your eyes peeled. The current spot price for an m4.xlarge instance is $0.026. We’ve set our maximum price at $0.03 to give us some wiggle room. At time of writing, the on-demand price is $0.232. The numbers speak for themselves.\n\n>Note: Spot pricing can vary significantly between instance sizes, regions and even availability zones in the same region. This guide assumes that spot pricing won’t vary massively or that you’ve set a good buffer above the current spot price to avoid outages.\n\n```\namazonec2-request-spot-instance=true & amazonec2-spot-price=0.03\n```\n\nThis tells GitLab CI/CD that instead of just spawning new EC2 instances at full price, that it should request Spot Instances at the current spot price, setting a maximum bid that it should not exceed per hour, in USD (regardless of what currency you’re billed in. We’re billed in GBP, but Spot Instances are still calculated in USD.) The maximum bid is whatever you’re comfortable paying. We tend to set it close to the on-demand price because we’re looking for any discount. As long as we’re not paying more than we otherwise would, it’s fine with us. Your financial constraints may affect your decisions differently.\n\n>Update: From October, AWS will charge in seconds, rather than hours used, making the potential savings even higher for unused partial hours.\n\nWe’d love to see how you get along with this so please let us know. You can contact me max [at] substrakthealth [dot] com. For us, it’s saved us time and money and that’s never a bad thing.\n\n## About the Guest Author\n\nMax Woolf is a Senior Developer at Substrakt Health. Based in the UK, they use innovative technology to transform how primary care providers organize and deliver care to patients in a sustainable NHS.\n\n_[Autoscale GitLab CI runners and save 90% on EC2 costs](https://substrakthealth.com/autoscale-gitlab-ci-runners-and-save-90-on-ec2-costs/) was originally published on Substrakt Health's blog._\n\nCover image by [Sebastien Gabriel](https://unsplash.com/@sgabriel) on Unsplash\n{: .note}\n",[4440,110],{"slug":7886,"featured":6,"template":678},"autoscale-ci-runners","content:en-us:blog:autoscale-ci-runners.yml","Autoscale Ci Runners","en-us/blog/autoscale-ci-runners.yml","en-us/blog/autoscale-ci-runners",{"_path":7892,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7893,"content":7899,"config":7905,"_id":7907,"_type":16,"title":7908,"_source":17,"_file":7909,"_stem":7910,"_extension":20},"/en-us/blog/working-with-yaml-gitlab-ci-android",{"title":7894,"description":7895,"ogTitle":7894,"ogDescription":7895,"noIndex":6,"ogImage":7896,"ogUrl":7897,"ogSiteName":692,"ogType":693,"canonicalUrls":7897,"schema":7898},"Working with YAML in GitLab CI from the Android perspective","Guest author Renato Stanic shares a sample YAML configuration for Android projects, which helps his team with faster, more iterative development.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665524/Blog/Hero%20Images/yaml-gitlab-ci-android.png","https://about.gitlab.com/blog/working-with-yaml-gitlab-ci-android","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Working with YAML in GitLab CI from the Android perspective\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Renato Stanic\"}],\n        \"datePublished\": \"2017-11-20\",\n      }",{"title":7894,"description":7895,"authors":7900,"heroImage":7896,"date":7902,"body":7903,"category":14,"tags":7904},[7901],"Renato Stanic","2017-11-20","\nUsing [continuous integration in our everyday workflow](/solutions/continuous-integration/) can help us a lot with faster and iterative development, and having CI do checks every time we change our codebase helps us with deal with fear of modifying code.\n\n\u003C!-- more -->\n\nDeploying app builds manually takes time and leaves us idle while we could be developing new and exciting features instead. Here at Undabot we are using GitLab CI for continuous integration. GitLab CI uses a YAML file for job configuration. In this blog post we will go through a sample YAML configuration for Android projects and describe the main YAML building blocks with common Android CI jobs.\n\n### YAML intro\nThe YAML file defines a set of jobs with constraints stating when they should be run. The jobs are defined as top-level elements with a name and always have to contain at least the `script` clause:\n\n```\nhelloworld_job:\n  script: \"echo Hello World!\"\n\nassemble_job:\n  script: \"./gradlew assembleRelease\"\n```\n\nYAML syntax allows for more complex job definitions than in the above example:\n\n```\nbefore_script:\n  - bundle install\n\nafter_script:\n  - rm secrets\n\nstages:\n  - build\n  - test\n  - deploy\n\nhelloworld_job:\n  stage: build\n  script:\n    - echo Hello World\n  only:\n    - master\n  tags:\n    - android\n```\n\n`before_script` – commands that run before each jobs script\n`after_script` – commands that run after each jobs script\n`stages` – used to define build stages\n`only` – defines the names of branches and tags for which the job will run\n`tags` – used to select specific Runners from the list of all Runners that are allowed to run this project.\n\n## Initial setup for Android\n\nFirst step is to create a YAML file called `gitlab-ci.yml` in root directory of your Android project and add the following code:\n\n```\nbefore_script:\n  - export ANDROID_HOME=\"$HOME/Library/Android/sdk\"\n  - bundle install\nstages:\n  - build\n  - test\n  - quality_assurance\n  - deploy\n```\n\nIn `before_script` we execute these two commands:\n`- export ANDROID_HOME=\"$HOME/Library/Android/sdk”`– sets Android home environment variable to be available for all other jobs and Gradle tasks\n`- bundle install` – we are using fastlane for task automation and Bundler to manage Ruby gems so we need to run bundle install to make sure everything is installed correctly.\n\nIn the `stages` section we define four build stages:\n`- build`– for build jobs\n`- test`– for test jobs that include unit and instrumentation tests\n`- quality_assurance`– for jobs that run all of our QA tools\n`- deploy`– for deployment jobs\n\n## Build stage\n\nThis job (`build_job`) is used to create an APK artifact that can be used to test the app manually or to upload it to the Play Store.\n\n```\nbuild_job:\n  stage: build\n  script:\n    - ./gradlew clean assembleRelease\n  artifacts:\n    paths:\n      - app/build/outputs/\n  ```\n\n`build_job:`– name of the CI job\n`stage: build`– it gets executed in the build stage\n`./gradlew clean assembleRelease`– executes Gradle command to create a release APK\n`artifacts:`– job section that defines list of files and directories that are attached to a job after completion.\n`paths:`– output file paths\n`app/build/outputs`– directory path of our APK\n\n## Unit tests\n\nThis job (`unit_tests`) runs our unit tests in a test stage. Every time they fail, a report artifact will be created. Each report artifact expires within four days of creation.\n\n```\nunit_tests:\n  stage: test\n  script:\n    - ./gradlew test\n  artifacts:\n    name: \"reports_${CI_PROJECT_NAME}_${CI_BUILD_REF_NAME}\"\n    when: on_failure\n    expire_in: 4 days\n    paths:\n      - app/build/reports/tests/\n  ```\n\n`./gradlew test`– run Gradle command that triggers our unit tests artifacts:\n`name:`– defines artifact name by using environment variables\n`CI_PROJECT_NAME`– project name that is currently being built\n`CI_BUILD_REF_NAME`– branch or tag name for which project is built\n`when:`– defines when is it created (on_success, on_failure, always)\n`expire_in:`– defines when is it expired, after artifact has expired it gets deleted from CI\n\n## Instrumentation tests\n\nThis job (`instrumentation_tests`) runs all of our instrumentation tests in a test stage by starting a windowless emulator without sound and animations followed by a [custom bash script](https://gist.github.com/anonymous/614aafb2d8710865c688684a8657a141) that waits for the emulator to start, after which the device is unlocked by sending key event 82. When the emulator is ready we run the Gradle command for instrumentation tests. Once all tests finished running, the emulator is killed with a [custom bash script](https://gist.github.com/anonymous/614aafb2d8710865c688684a8657a141).\n\n```\ninstrumentation_tests:\n  stage: test\n  script:\n    - emulator -avd testAVD -no-audio -no-window &\n    - ./ci/android-wait-for-emulator.sh\n    - adb devices\n    - adb shell settings put global window_animation_scale 0 &\n    - adb shell settings put global transition_animation_scale 0 &\n    - adb shell settings put global animator_duration_scale 0 &\n    - adb shell input keyevent 82 &\n    - ./gradlew connectedAndroidTest\n    - ./ci/stop-emulators.sh\n  artifacts:\n    name: \"reports_${CI_PROJECT_NAME}_${CI_BUILD_REF_NAME}\"\n    when: on_failure\n    expire_in: 4 days\n    paths:\n      - app/build/reports/androidTests/connected/\n  ```\n\n`- emulator - avd testAVD -no-audio -no-window &`\n`- ./ci/android-wait-for-emulator.sh`\nStarts the emulator and waits for it to boot.\n`- adb devices`\nDisplays list of found devices in GitLab web terminal.\n`- adb shell settings put global window_animation_scale 0 &`\n`- adb shell settings put global transition_animation_scale 0 &`\n`- abd shell settings put global animator_duration_scale 0 &`\nDisables all animations and transitions.\n\n## Static analysis\n\nThis job (`static_analysis`) runs all of static code analysis in QA stage. This is a tricky area especially if you are working on a project with a lot of legacy code. My suggestion would be to disable all of the rules and start fixing them one at the time. Tools used for static analysis are lint, checkstyle, pmd and findbugs.\n\n```\nstatic_analysis:\n  stage: quality_assurance\n  script:\n    - ./gradlew lint\n    - ./gradlew checkstyle\n    - ./gradlew pmd\n    - ./gradlew findbugs\n  artifacts:\n    name: \"reports_${CI_PROJECT_NAME}_${CI_BUILD_REF_NAME}\"\n    when: on_failure\n    expire_in: 4 days\n    paths:\n      - app/build/reports/\n```\n\n`- ./gradlew lint`\n`- ./gradlew checkstyle`\n`- ./gradlew pmd`\n`- ./gradlew findbugs`\nGradle commands that trigger QA tools.\n`- app/build/reports` – path to our QA reports\n\n## Deploy stage\n\nThe final job (`deploy_internal`) deploys the app to the QA team in deploy stage. You don’t want to deploy every time you commit something so this step is set as manual. Manual jobs are triggered via GitLab web interface by pressing the play button in your pipeline list. If you are using fastlane as your deployment tool, the last job will look like the following code:\n\n```\ndeploy_internal:\n  stage: deploy\n  script:\n    - bundle exec fastlane android deploy_lane\n  when: manual\n```\n\n`- bundle exec fastlane android deploy_lane`– executes fastlane deploy lane that deploys app to the QA team\n`when: manual` – defines [when is a job executed](https://docs.gitlab.com/ee/ci/yaml/#when)\n\n## There’s plenty more\n\nSetting up Android continuous integration with GitLab CI is great and supports plenty of cool features a lot more than we showed. Hopefully this short introduction was helpful and is going to motivate you to discover more features on your own.\n\nComplete `gitlab-ci.yml`:\n\n```\nbefore_script:\n  - export ANDROID_HOME=\"$HOME/Library/Android/sdk\"\n  - bundle install\n\nstages:\n- build\n- test\n- quality_assurance\n- deploy\n\nbuild_job:\n  stage: build\n  script:\n    - ./gradlew clean assembleRelease\n  artifacts:\n    paths:\n    - app/build/outputs/\n\nunit_tests:\n  stage: test\n  script:\n    - ./gradlew test\n  artifacts:\n    name: \"reports_${CI_PROJECT_NAME}_${CI_BUILD_REF_NAME}\"\n    when: on_failure\n    expire_in: 4 days\n    paths:\n      - app/build/reports/tests/\n\ninstrumentation_tests:\n  stage: test\n  script:\n    - emulator -avd testAVD -no-audio -no-window &\n    - ./ci/android-wait-for-emulator.sh\n    - adb devices\n    - adb shell settings put global window_animation_scale 0 &\n    - adb shell settings put global transition_animation_scale 0 &\n    - adb shell settings put global animator_duration_scale 0 &\n    - adb shell input keyevent 82 &\n    - ./gradlew connectedAndroidTest\n    - ./ci/stop-emulators.sh\n  artifacts:\n    name: \"reports_${CI_PROJECT_NAME}_${CI_BUILD_REF_NAME}\"\n    when: on_failure\n    expire_in: 4 days\n    paths:\n      - app/build/reports/androidTests/connected/\n\nstatic_analysis:\n  stage: quality_assurance\n  script:\n    - ./gradlew lint\n    - ./gradlew checkstyle\n    - ./gradlew pmd\n    - ./gradlew findbugs\n  artifacts:\n    name: \"reports_${CI_PROJECT_NAME}_${CI_BUILD_REF_NAME}\"\n    when: on_failure\n    expire_in: 4 days\n    paths:\n      - app/build/reports/\n\ndeploy_internal:\n  stage: deploy\n  script:\n    - bundle exec fastlane android deploy_lane\n  when: manual\n```\n\n_[Working with YAML in GitLab CI from an Android perspective](https://blog.undabot.com/working-with-yaml-in-gitlab-ci-from-android-perspective-b8cf54b5b911) was originally published on Undabot's blog._\n",[110,4440],{"slug":7906,"featured":6,"template":678},"working-with-yaml-gitlab-ci-android","content:en-us:blog:working-with-yaml-gitlab-ci-android.yml","Working With Yaml Gitlab Ci Android","en-us/blog/working-with-yaml-gitlab-ci-android.yml","en-us/blog/working-with-yaml-gitlab-ci-android",{"_path":7912,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7913,"content":7919,"config":7924,"_id":7926,"_type":16,"title":7927,"_source":17,"_file":7928,"_stem":7929,"_extension":20},"/en-us/blog/humangeo-switches-jenkins-gitlab-ci",{"title":7914,"description":7915,"ogTitle":7914,"ogDescription":7915,"noIndex":6,"ogImage":7916,"ogUrl":7917,"ogSiteName":692,"ogType":693,"canonicalUrls":7917,"schema":7918},"HumanGeo switched from Jenkins to GitLab and cut costs by 1/3","Management overhead was bogging down the team at HumanGeo. GitLab freed up more than just cash.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680315/Blog/Hero%20Images/humangeo-switches-jenkins-to-gitlab.jpg","https://about.gitlab.com/blog/humangeo-switches-jenkins-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"HumanGeo switched from Jenkins to GitLab and cut costs by 1/3\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Chia\"}],\n        \"datePublished\": \"2017-11-14\",\n      }",{"title":7914,"description":7915,"authors":7920,"heroImage":7916,"date":7921,"body":7922,"category":14,"tags":7923},[7015],"2017-11-14","\n\nAs a software development company, [HumanGeo](http://www.thehumangeo.com/) ships a lot of code. Specializing in geospatial visualization, they have clients in every sector from video game companies to government agencies. The ability to manage multiple projects, iterate quickly, and operate at scale is critical to their success. Over time, a robust DevOps practice has evolved to allow them to quicken their pace of innovation. But traditional tools in their stack, like Jenkins CI, haven’t be able to deliver.\n\n\u003C!-- more -->\n\nI recently caught up with [Justin Shelton](https://twitter.com/kwonstant), an engineer at HumanGeo, to talk about their expanded use of GitLab and how it’s improved both their workflow and budget. Here’s what he had to say:\n\n## Ease of use cuts admin time by 96%\n\n**William**: Can you tell me about the benefits you’ve seen from GitLab in terms of ease-of-use?\n\n**Justin**: Defining CI as code fits great with the \"Infrastructure as Code\" philosophy. We already push hard to have AWS environments expressed in CloudFormation templates, provisioning via Ansible, and so on. With GitLab CI, we can manage our CI pipeline the same way – with code.\n\nManaging YAML for Domain Specific Language (DSL) is way easier than managing Groovy for Jenkinsfiles (or most other config formats, for that matter). YAML is far more widespread and easy to understand, so more developers at junior and senior levels are exposed to it. The path to getting smart on writing GitLab CI DSL is much faster than coming up to speed on Groovy. While Jenkins is overwhelmingly customizable and familiar, it became Yet Another Thing to Manage™. In the end, GitLab CI shares a lot of the same (and in some cases more) configuration options.\n\nAs full stack engineers we do a lot of our own systems administration. Reducing our platform management burden is a huge plus. We used to spend a 5-6 hours each month managing Jenkins and keeping it running. Now, I might spend 10-15 minutes a month managing GitLab CI.\n\n## Flexible CI runners cuts costs 33%\n\n**William**: In [your blog post](http://blog.thehumangeo.com/gitlab-autoscale-runners.html) you shared that GitLab helped to cut infrastructure costs. How did that work in practice?\n\n**Justin**: The ability to integrate with handlers, like the Docker Machine interface I talk about in the post, is huge for helping to manage costs. We get resources when we need them, and can spin them down when we don't. That saves big money compared to maintaining a large instance and having to manage the JVM size and other factors whenever we run out of space. With Jenkins we used to run a dedicated m2.xlarge on AWS all the time for CI purposes. Now, with GitLab, we are able to run spot instances for only around 40 hours a week, resulting in about 1/3 cost savings. Engineers can change a few config items, and managers can see savings. Win!\n\n## Increasing the pace of innovation\n\n**William**: How else has GitLab adoption impacted your workflow?\n\n**Justin**: The speed of development is huge – new features get added every month, and I get genuinely excited to check out the release notes and update our instance every month. (Another perk is how simple this is, upgrading with two apt commands is as easy as it gets.)\n\n[Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) is the thing I'm most excited to dig into further that's come out recently. I'm excited about taking some of our bespoke release processes and tightening them up using this process. We're sticklers for code quality, so the Code Quality features were big, and we want to start utilizing Auto DevOps for canary releases as well.\n\n## Learn exactly how they did it\n\nAt HumanGeo using Jenkins CI proved to be costly in both time and money. Switching to GitLab reduced administration overhead, lowered spend, and increased development velocity. Justin wrote up a post to share all the technical details on [how HumanGeo scaled GitLab CI runners](http://blog.thehumangeo.com/gitlab-autoscale-runners.html). Check it out and let know us know what you think in the comments or on Twitter.\n\n\"[Pipe Dream](https://unsplash.com/photos/T7s_TnKO-dk)\" by [Sharosh Rajasekher](https://unsplash.com/@sharosh) on Unsplash\n{: .note}\n",[916,832,4440],{"slug":7925,"featured":6,"template":678},"humangeo-switches-jenkins-gitlab-ci","content:en-us:blog:humangeo-switches-jenkins-gitlab-ci.yml","Humangeo Switches Jenkins Gitlab Ci","en-us/blog/humangeo-switches-jenkins-gitlab-ci.yml","en-us/blog/humangeo-switches-jenkins-gitlab-ci",{"_path":7931,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7932,"content":7938,"config":7943,"_id":7945,"_type":16,"title":7946,"_source":17,"_file":7947,"_stem":7948,"_extension":20},"/en-us/blog/gitlab-vue-one-year-later",{"title":7933,"description":7934,"ogTitle":7933,"ogDescription":7934,"noIndex":6,"ogImage":7935,"ogUrl":7936,"ogSiteName":692,"ogType":693,"canonicalUrls":7936,"schema":7937},"How we do Vue: one year later","How we, at GitLab, write VueJS, one year later.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680321/Blog/Hero%20Images/vue-title.jpg","https://about.gitlab.com/blog/gitlab-vue-one-year-later","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we do Vue: one year later\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jacob Schatz\"}],\n        \"datePublished\": \"2017-11-09\",\n      }",{"title":7933,"description":7934,"authors":7939,"heroImage":7935,"date":7940,"body":7941,"category":14,"tags":7942},[6995],"2017-11-09","\n\nIt's been a while since [we wrote about Vue](/blog/why-we-chose-vue/). We've been using Vue for over a year now and life has been very good. Thanks [@lnoogn](https://twitter.com/lnoogn) for reminding me to write this article!\n\n\u003C!-- more -->\n\nOur situation reminds me of a quote about Scala from [\"Is Scala slowly dying?\"](https://www.reddit.com/r/scala/comments/2hw0bp/is_scala_slowly_dying/) Someone once said:\n\n> Scala people don't have time for redditing and blogging, they're busy getting crap done.\n\nWhich is exactly what we've been doing. Like Scala, Vue works really, really well, when used properly. It turns out Vue isn't a buzzword, Vue is a workhorse. A lot of our problems have been solved, by us and others. We still have problems but, we now have a reproducible \"way to write Vue.\" We don't adopt every new idea out there, but we have changed a few things since we last spoke.\n\nSince that last post, we published a [very extensive Vue style guide](https://docs.gitlab.com/ee/development/fe_guide/vue.html), after which Vue also put out a [style guide](https://vuejs.org/v2/style-guide/), [taking inspiration from ours](https://github.com/vuejs/eslint-plugin-vue/issues/77#issuecomment-315834845). The style guide has been updated several times as we discover better ways to write Vue. Here are some of the things we discovered.\n\n## Just use VueX\n\nWe discovered that [VueX](https://vuex.vuejs.org/) makes our lives easier. If you are writing a medium to large feature, use VueX. If it's a tiny feature, you might get away without it. We made the mistake of not using VueX for a large feature. We wrote a [multi-file editor](https://gitlab.com/gitlab-org/gitlab-ce/issues/31890) (WIP) to replace our current repo file view, to allow easy editing of multiple files.\n\n![multi-file-editor.png](https://about.gitlab.com/images/vue_2017/multi-file-editor.png){: .shadow}\n\nIn the beginning we did not use VueX for this feature and instead used the store pattern. The Vue docs talk about the [store pattern](https://vuejs.org/v2/guide/state-management.html#Simple-State-Management-from-Scratch), which works well when you are committed to strictly keeping to the pattern. We've found that you are better off spending your time with VueX instead. While VueX is initially more verbose, it is much more scalable, and will save you tons of time in the long run. Our mistake happened when we changed the data in multiple places. In VueX you are forced to change the data in one central place. If you don't do this, you will wind up chasing unexpected bugs around.\n\n## Write high quality code\n\nEven though VueJS and VueX are both wonderful, it is still possible (as with any code) to write bad Vue code. While the code may work, your longevity and scalability may suffer. Performance can suffer. With Vue, it makes it so easy to have what seems like working, perfect code because Vue is so simple to write. Longevity problems can mean that your code initially works, but you (and others) will have a hard time trying to update the code. Performance problems might not crop up with small data sets, but will with larger ones. Code can get messy. Your code can get smelly. Yes, even with Vue, you can have [code smell](https://en.wikipedia.org/wiki/Code_smell).\n\nWhen you add something to the `data` object or the `store` for Vue to keep track of, Vue will recursively walk down your data object and keep track of everything. If your data is super hierarchical and just large in general, and you are changing things often (like maybe on `mousemove`), then you can create jank. It's not bad to have Vue observe large data sets, but just confirm that you do in fact need the data you are watching to be reactive. It's easy with Vue to just make everything reactive, when it might not need to be.\n\nThat's why we are very strict when anyone writes Vue code. They must [follow our documentation](https://docs.gitlab.com/ee/development/fe_guide/vue.html). They must also only write Vue when it is necessary and not write it [when it is overkill](https://docs.gitlab.com/ee/development/fe_guide/vue.html#when-not-to-use-vue-js).\n\nAll of our new Vue code follows the [Flux architecture](https://facebook.github.io/flux/). VueX also follows Flux, which is part of the reason we use VueX. You can use the previously mentioned \"store pattern,\" but VueX is a better choice because it enforces all of the rules. If you go rogue, you will wind up enforcing the rules yourself, and you will probably make mistakes. The less you put on your plate, the better. A good example of a well-written Vue app is the [registry image list](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/14303).\n\n### I want to use jQuery with Vue\n\nDuring new development, this question kept popping up.\n> Is it ever OK to mix jQuery with VueJS?\n\nWe are not talking about using [Select2](https://select2.org/), which is a jQuery library. We are talking about the need to query the DOM. We had discussions about using jQuery and the following was proposed:\n\n> Using jQuery is OK, but only for querying.\n\nAt first I had several discussions about using jQuery with Vue. Some had said it might be OK, but only in read-only (querying) situations. However, after doing the research, we found that it is **not** a good idea to use jQuery with Vue. There will always be a better solution. We found that if you ever find yourself needing to query to DOM within a Vue architecture, then you are doing something wrong.\n\nIf one were to hypothetically use jQuery for only the tiniest querying situations, one would have to quantify those situations. You should instead swear off querying the DOM when in Vue.\n\nInstead of querying, you will find that using the `store` in combination with the server-side code is usually a much simpler answer. The server can provide validity to your data that you cannot provide on the client side. For the most part, we find that the less we have to fool with the data on the client side the better. That's not to say it's never OK to modify the data on the client side, but that it isn't usually the cleanest solution. At GitLab we use querying only to grab endpoints from the `data` attribute of our main element, but we don't use jQuery, we use `el.dataset`. At GitLab, we (the Frontend people) talk with the Backend people to ensure the structure of the data we will be consuming. In that way, both the Frontend team and the Backend team can be in control.\n\n#### Example situation:\n\nCheck out this issue:\n\n![issue](https://about.gitlab.com/images/vue_2017/issue.png){: .shadow}\n\nWe now render all issue comments in Vue. An example of a situation where we wanted to use jQuery was during the rewrite of the edit-the-last-user-comment feature. When someone presses that `up` key on their keyboard from an empty new comment `textarea` (at the very bottom of the page) we allow them to edit the last comment they created, just like in Slack. Not just the last comment, but the last comment *they created*. We marked the last user comment in the picture in red. Of course there is a time crunch. Then someone might say,\n\n> Can't we just do a quick solution here and fix it later?\n\nSurely you *could* query the DOM for this. A better solution, in this case, is to let the backend developers mark the last user comment in the JSON they return. Backend developers have direct access to the database, which means they may be able to optimize the code. Then no client-side work has to be done at all, in this case. Someone has to do the work to mark the last user comment. In this case the solution is just finding the right person for the job. Once you have that data from the server, the comment is in your `store`, ready for your easy access. You can do anything now. The world is your oyster.\n\nIf you find yourself querying the DOM, \"just this one time\" 😉, there is always a better solution.\n\n### The proper Vue app\n\nEvery Vue bundle needs one store, one service, and always has one entry point. Your entry point component is the only container component and every other component is presentational. All this information is in our Vue docs.\n\nYou can start out with a single `div`.\n\n```html\n\u003C!--HAML-->\n.js-vue-app{ data: { endpoint: 'foo' }}\n\n\u003C!--HTML-->\n\u003Cdiv class=\"js-vue-app\" data-endpoint=\"foo\">\u003C/div>\n```\nYou can pass your endpoints in through the data attributes. Vue can then call these endpoints with an HTTP client of your choice.\n\nYou don't want to do any URL building in client-side JavaScript. Make sure you pass in all your server-built URLs through endpoints. When writing Vue it's important to let the server do what it should.\n\n## Improve performance\n\nWe recently rewrote our issue comments in Vue. The issue comments were previously written in Haml, jQuery, and Rails. We had a bottleneck because we were not loading the comments asynchronously. A quick solution is to load comments via ajax and populate comments after the page loads. One way to make a page load faster is to not block the page with heavy items and load them after.\n\n![comments.png](https://about.gitlab.com/images/vue_2017/comments.png){: .shadow}\n\nWhat we love is that one day we turned on the new comments and some people didn't know that we had refactored it. As a result of the refactor our issue pages load much faster, and there is less jank.\n\nLoading the comments on the issue page is now streamlined and now individual issues load much faster. In the past, an issue page could have tens of thousands of event listeners. Our previous code was not properly removing and keeping track of event listeners. Those massive event listeners (along with other problems) created jank, so scrolling the page was choppy with many comments. We removed jQuery and added in Vue and focused on improving the performance. You can clearly see and feel that the page is much faster. However, our work to improve the performance has just begun. This rewrite sets the foundation for performance improvements that are easier to write, because the code is much more maintainable. Previously the code was hard to maintain. Now the issue comments code is properly separated and \"componentized.\"\n\nWith these new improvements, as well as other parallel improvements, e.g. loading images on scroll, we were able to make the page load and perform faster.\n\n![speed.png](https://about.gitlab.com/images/vue_2017/speed.png){: .shadow}\n\nRefactoring is that word that a new, super-green developer mentions on day one when they suggest to rewrite everything in Angular. That hasn't happened at GitLab. Our frontend devs tend to be very conservative, which is a very good thing. Which begs the question, why does it seems like [everyone is always refactoring](https://reasonml.github.io/community/blog/#reason-3)? What are they trying to achieve? I can only speak for GitLab. What do we want to achieve with a refactor? In reality it's going to cost a lot of money. The costs are:\n\n1. Cost of doing the refactoring.\n1. Cost of testing the change.\n1. Cost of updating tests and documentation.\n\nYou also have more risk:\n\n1. Risk of introducing bugs.\n1. Risk of taking on a huge task that you can't finish.\n1. Risk of not achieving the quality/improvements you intended.\n\nOur goals are:\n\n**Goal #1**: Make the code more maintainable. We want to make the process of adding new features easier. In the long term this refactor will save us time, but it takes a significant amount of time to recoup the time spent refactoring. The hard truth may be that a refactor usually does not save you time, but can save you stress.\n\n**Goal #2**: What it can do, if done right, is make developers happy. Nothing gives your team more horsepower than a happy, excited coder. A stressed-out coder will want to stop coding; an excited coder will not want to stop. A happy coder saves the most time.\n\nTo meet our goal our next step is to refactor the merge request comments section. Our merge request comments are massively slow for merge requests with lots of comments. The comments become slower and start to be less responsive at around 200 comments. The diffs are slow as well. There are a ton of reasons for this, one of which is that JavaScript is causing multiple reflows that take tons of time. We could refactor this and have already put in a fix, but this isn't a long-term solution.  In the case of a huge MR, there was code that was causing a reflow that [takes over eight seconds](https://gitlab.com/gitlab-org/gitlab-ce/issues/39332)! This is now fixed. In this [image](https://gitlab.com/gitlab-org/gitlab-ce/uploads/e18856a1544d4d0e6420d11fd0479af7/ss__2017-10-20_at_1.41.04_PM.png)  you can see there is other stuff slowing things down. Clearly there is a lot of work to do here. Our biggest problem is that the code is not maintainable, which means that fixes take longer. A refactor into Vue will provide some great initial speed improvements, and lay the groundwork for easier improvements in the future.\n\nThere is so much work to do at GitLab. If you want to be a part of exploring the massive catacombs of GitLab and writing awesome code and if you are interested in helping out our Frontend team, then [apply](https://handbook.gitlab.com/job-families/engineering/development/frontend/).\n",[1979,915],{"slug":7944,"featured":6,"template":678},"gitlab-vue-one-year-later","content:en-us:blog:gitlab-vue-one-year-later.yml","Gitlab Vue One Year Later","en-us/blog/gitlab-vue-one-year-later.yml","en-us/blog/gitlab-vue-one-year-later",{"_path":7950,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7951,"content":7957,"config":7963,"_id":7965,"_type":16,"title":7966,"_source":17,"_file":7967,"_stem":7968,"_extension":20},"/en-us/blog/automating-boring-git-operations-gitlab-ci",{"title":7952,"description":7953,"ogTitle":7952,"ogDescription":7953,"noIndex":6,"ogImage":7954,"ogUrl":7955,"ogSiteName":692,"ogType":693,"canonicalUrls":7955,"schema":7956},"GitBot – automating boring Git operations with CI","Guest author Kristian Larsson shares how he automates some common Git operations, like rebase, using GitLab CI.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672374/Blog/Hero%20Images/gitbot-automate-git-operations.jpg","https://about.gitlab.com/blog/automating-boring-git-operations-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitBot – automating boring Git operations with CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kristian Larsson\"}],\n        \"datePublished\": \"2017-11-02\",\n      }",{"title":7952,"description":7953,"authors":7958,"heroImage":7954,"date":7960,"body":7961,"category":14,"tags":7962},[7959],"Kristian Larsson","2017-11-02","\n\nGit is super useful for anyone doing a bit of development work or just trying to\nkeep track of a bunch of text files. However, as your project grows you might\nfind yourself doing lots of boring repetitive work just around Git itself. At\nleast that’s what happened to me and so I automated some boring Git stuff using our\n[continuous integration (CI) system](/solutions/continuous-integration/).\n\n\u003C!-- more -->\n\nThere are probably all sorts of use cases for automating various Git operations\nbut I’ll talk about a few that I’ve encountered. We’re using GitLab and [GitLab\nCI](/solutions/continuous-integration/) so that’s what my examples\nwill include, but most of the concepts should apply to other systems as well.\n\n## Automatic rebase\n\nWe have some Git repos with source code that we receive from vendors, who we can think\nof as our `upstream`. We don’t actually share a Git repo with the vendor but\nrather we get a tar ball every now and then. The tar ball is extracted into a\nGit repository, on the `master` branch which thus tracks the software as it is\nreceived from upstream. In a perfect world the software we receive would be\nfeature complete and bug free and so we would be done, but that’s usually not\nthe case. We do find bugs and if they are blocking we might decide to implement\na patch to fix them ourselves. The same is true for new features where we might\nnot want to wait for the vendor to implement it.\n\nThe result is that we have some local patches to apply. We commit such patches\nto a separate branch, commonly named `ts` (for TeraStream), to keep them\nseparate from the official software. Whenever a new software version is released,\nwe extract its content to `master` and then rebase our `ts` branch onto `master`\nso we get all the new official features together with our patches. Once we’ve\nimplemented something we usually send it upstream to the vendor for inclusion.\nSometimes they include our patches verbatim so that the next version of the code\nwill include our exact patch, in which case a rebase will simply skip our patch.\nOther times there are slight or major (it might be a completely different design)\nchanges to the patch and then someone typically needs to sort out the patches\nmanually. Mostly though, rebasing works just fine and we don’t end up with conflicts.\n\nNow, this whole rebasing process gets a tad boring and repetitive after a while,\nespecially considering we have a dozen of repositories with the setup described\nabove. What I recently did was to automate this using our CI system.\n\nThe workflow thus looks like:\n\n- human extracts zip file, git add + git commit on master + git push\n- CI runs for `master` branch\n   - clones a copy of itself into a new working directory\n   - checks out `ts` branch (the one with our patches) in working directory\n   - rebases `ts` onto `master`\n   - push `ts` back to `origin`\n- this event will now trigger a CI build for the `ts` branch\n- when CI runs for the `ts` branch, it will compile, test and save the binary output as “build artifacts”, which can be included in other repositories\n- GitLab CI, which is what we use, has a CI_PIPELINE_ID that we use to version built container images or artifacts\n\nTo do this, all you need is a few lines in a .gitlab-ci.yml file, essentially;\n\n```\nstages:\n  - build\n  - git-robot\n\n... build jobs ...\n\ngit-rebase-ts:\n  stage: git-robot\n  only:\n    - master\n  allow_failure: true\n  before_script:\n    - 'which ssh-agent || ( apt-get update -y && apt-get install openssh-client -y )'\n    - eval $(ssh-agent -s)\n    - ssh-add \u003C(echo \"$GIT_SSH_PRIV_KEY\")\n    - git config --global user.email \"kll@dev.terastrm.net\"\n    - git config --global user.name \"Mr. Robot\"\n    - mkdir -p ~/.ssh\n    - cat gitlab-known-hosts >> ~/.ssh/known_hosts\n  script:\n    - git clone git@gitlab.dev.terastrm.net:${CI_PROJECT_PATH}.git\n    - cd ${CI_PROJECT_NAME}\n    - git checkout ts\n    - git rebase master\n    - git push --force origin ts\n  ```\n\nWe’ll go through the Yaml file a few lines at a time. Some basic knowledge about GitLab CI is assumed.\n\nThis first part lists the stages of our pipeline.\n\n```\nstages:\n  - build\n  - git-robot\n  ```\n\nWe have two stages, first the `build` stage, which does whatever you want it to\ndo (ours compiles stuff, runs a few unit tests and packages it all up), then the\n`git-robot` stage which is where we perform the rebase.\n\nThen there’s:\n\n```\ngit-rebase-ts:\n  stage: git-robot\n  only:\n    - master\n  allow_failure: true\n  ```\n\nWe define the stage in which we run followed by the only statement which limits\nCI jobs to run only on the specified branch(es), in this case `master`.\n\n`allow_failure` simply allows the CI job to fail but still passing the pipeline.\n\nSince we are going to clone a copy of ourselves (the repository checked out in\nCI) we need SSH and SSH keys set up. We’ll use ssh-agent with a password-less key\nto authenticate. Generate a key using ssh-keygen, for example:\n\n```\nssh-keygen\n\nkll@machine ~ $ ssh-keygen -f foo\nGenerating public/private rsa key pair.\nEnter passphrase (empty for no passphrase):\nEnter same passphrase again:\nYour identification has been saved in foo.\nYour public key has been saved in foo.pub.\nThe key fingerprint is:\nSHA256:6s15MZJ1/kUsDU/PF2WwRGA963m6ZSwHvEJJdsRzmaA kll@machine\nThe key's randomart image is:\n+---[RSA 2048]----+\n|            o**.*|\n|           ..o**o|\n|           Eo o%o|\n|          .o.+o O|\n|        So oo.o+.|\n|       .o o.. o+o|\n|      .  . o..o+=|\n|     . o ..  .o= |\n|      . +.    .. |\n+----[SHA256]-----+\nkll@machine ~ $\n```\n\nAdd the public key as a deploy key under Project Settings\n\u003Ci class=\"fas fa-arrow-right\" aria-hidden=\"true\">\u003C/i> Repository \u003Ci class=\"fas fa-arrow-right\" aria-hidden=\"true\">\u003C/i>\nDeploy Keys. Make sure you enable write access or you won’t be able to have your\nGit robot push commits. We then need to hand over the private key so that it can\nbe accessed from within the CI job. We’ll use a secret environment variable for\nthat, which you can define under Project Settings\n\u003Ci class=\"fas fa-arrow-right\" aria-hidden=\"true\">\u003C/i> Pipelines \u003Ci class=\"fas fa-arrow-right\" aria-hidden=\"true\">\u003C/i>\nEnvironment variables). I’ll use the environment variable GIT_SSH_PRIV_KEY for this.\n\nNext part is the before_script:\n\n```\n  before_script:\n    - 'which ssh-agent || ( apt-get update -y && apt-get install openssh-client -y )'\n    - eval $(ssh-agent -s)\n    - ssh-add \u003C(echo \"$GIT_SSH_PRIV_KEY\")\n    - git config --global user.email \"kll@dev.terastrm.net\"\n    - git config --global user.name \"Mr. Robot\"\n    - mkdir -p ~/.ssh\n    - cat gitlab-known-hosts >> ~/.ssh/known_hosts\n  ```\n\nFirst ssh-agent is installed if it isn’t already. We then start up ssh-agent and\nadd the key stored in the environment variable GIT_SSH_PRIV_KEY (which we set up\npreviously). The Git user information is set and we finally create .ssh and add\nthe known host information about our GitLab server to our known_hosts file. You\ncan generate the gitlab-known-hosts file using the following command:\n\n```\nssh-keyscan my-gitlab-machine >> gitlab-known-hosts\n```\n\nAs the name implies, the before_script is run before the main `script` part and\nthe ssh-agent we started in the before_script will also continue to run for the\nduration of the job. The ssh-agent information is stored in some environment\nvariables which are carried across from the before_script into the main script,\nenabling it to work. It’s also possible to put this SSH setup in the main script,\nI just thought it looked cleaner splitting it up between before_script and script.\nNote however that it appears that after_script behaves differently so while it’s\npossible to pass environment vars from before_script to script, they do not\nappear to be passed to after_script. Thus, if you want to do Git magic in the\nafter_script you also need to perform the SSH setup in the after_script.\n\nThis brings us to the main script. In GitLab CI we already have a checked-out\nclone of our project but that was automatically checked out by the CI system\nthrough the use of magic (it actually happens in a container previous to the one\nwe are operating in, that has some special credentials) so we can’t really use\nit, besides, checking out other branches and stuff would be really weird as it\ndisrupts the code we are using to do this, since that’s available in the Git\nrepository that’s checked out. It’s all rather meta.\n\nAnyway, we’ll be checking out a new Git repository where we’ll do our work, then\nchange the current directory to the newly checked-out repository, after which\nwe’ll check out the `ts` branch, do the rebase and push it back to the origin remote.\n\n```\n    - git clone git@gitlab.dev.terastrm.net:${CI_PROJECT_PATH}.git\n    - cd ${CI_PROJECT_NAME}\n    - git checkout ts\n    - git rebase master\n    - git push --force origin ts\n  ```\n\n… and that’s it. We’ve now automated the rebasing of a branch in our config file. Occasionally it\nwill fail due to problems rebasing (most commonly merge conflicts) but then you\ncan just step in and do the above steps manually and be interactively prompted\non how to handle conflicts.\n\n## Automatic merge requests\n\nAll the repositories I mentioned in the previous section are NEDs, a form of\ndriver for how to communicate with a certain type of device, for Cisco NSO (a\nnetwork orchestration system). We package up Cisco NSO, together with these NEDs\nand our own service code, in a container image. The build of that image is\nperformed in CI and we use a repository called `nso-ts` to control that work.\n\nThe NEDs are compiled in CI from their own repository and the binaries are saved\nas build artifacts. Those artifacts can then be pulled in the CI build of `nso-ts`.\nThe reference to which artifact to include is the name of the NED as well as the\nbuild version. The version number of the NED is nothing more than the pipeline\nid (which you’ll access in CI as ${CI_PIPELINE_ID}) and by including a specific\nversion of the NED, rather than just use “latest” we gain a much more consistent\nand reproducible build.\n\nWhenever a NED is updated a new build is run that produces new binary artifacts.\nWe probably want to use the new version but not before we test it out in CI. The\nactual versions of NEDs to use is stored in a file in the `nso-ts` repository and\nfollows a simple format, like this:\n\n```\nned-iosxr-yang=1234\nned-junos-yang=4567\n...\n```\n\nThus, updating the version to use is a simple job to just rewrite this text file\nand replace the version number with a given CI_PIPELINE_ID version number. Again,\nwhile NED updates are more seldom than updates to `nso-ts`, they do occur and\nhandling it is bloody boring. Enter automation!\n\n```\ngit-open-mr:\n  image: gitlab.dev.terastrm.net:4567/terastream/cisco-nso/ci-cisco-nso:4.2.3\n  stage: git-robot\n  only:\n    - ts\n  tags:\n    - no-docker\n  allow_failure: true\n  before_script:\n    - 'which ssh-agent || ( apt-get update -y && apt-get install openssh-client -y )'\n    - eval $(ssh-agent -s)\n    - ssh-add \u003C(echo \"$GIT_SSH_PRIV_KEY\")\n    - git config --global user.email \"kll@dev.terastrm.net\"\n    - git config --global user.name \"Mr. Robot\"\n    - mkdir -p ~/.ssh\n    - cat gitlab-known-hosts >> ~/.ssh/known_hosts\n  script:\n    - git clone git@gitlab.dev.terastrm.net:TeraStream/nso-ts.git\n    - cd nso-ts\n    - git checkout -b robot-update-${CI_PROJECT_NAME}-${CI_PIPELINE_ID}\n    - for LIST_FILE in $(ls ../ned-package-list.* | xargs -n1 basename); do NED_BUILD=$(cat ../${LIST_FILE}); sed -i packages/${LIST_FILE} -e \"s/^${CI_PROJECT_NAME}.*/${CI_PROJECT_NAME}=${NED_BUILD}/\"; done\n    - git diff\n    - git commit -a -m \"Use ${CI_PROJECT_NAME} artifacts from pipeline ${CI_PIPELINE_ID}\"\n    - git push origin robot-update-${CI_PROJECT_NAME}-${CI_PIPELINE_ID}\n    - HOST=${CI_PROJECT_URL} CI_COMMIT_REF_NAME=robot-update-${CI_PROJECT_NAME}-${CI_PIPELINE_ID} CI_PROJECT_NAME=TeraStream/nso-ts GITLAB_USER_ID=${GITLAB_USER_ID} PRIVATE_TOKEN=${PRIVATE_TOKEN} ../open-mr.sh\n```\n\nSo this time around we check out a Git repository into a separate working\ndirectory again, it’s just that it’s not the same Git repository as we are\nrunning on simply because we are trying to do changes to a repository that is\nusing the output of the repository we are running on. It doesn’t make much of a\ndifference in terms of our process. At the end, once we’ve modified the files we\nare interested in, we also open up a merge request on the target repository.\nHere we can see the MR (which is merged already) to use a new version of the\nNED `ned-snabbaftr-yang`.\n\n\u003Cimg src=\"/images/blogimages/gitbot-ned-update-mr.png\" alt=\"MR using new version of NED\" style=\"width: 700px;\"/>{: .shadow}\n\nWhat we end up with is that whenever there is a new version of a NED, a single merge\nrequest is opened on our `nso-ts` repository to start using the new NED. That\nmerge request is using changes on a new branch and CI will obviously run for\n`nso-ts` on this new branch, which will then test all of our code using the new\nversion of the NED. We get a form of version pinning, with the form of explicit\nchanges that it entails, yet it’s a rather convenient and non-cumbersome\nenvironment to work with thanks to all the automation.\n\n## Getting fancy\n\nWhile automatically opening an MR is sweet… we can do ~~better~~fancier. Our `nso-ts`\nrepository is based on Cisco NSO (Tail-F NCS), or actually the `nso-ts` Docker\nimage is based on a `cisco-nso` Docker image that we build in a separate\nrepository. We put the version of NSO as the tag of the `cisco-nso` Docker\nimage, so `cisco-nso:4.2.3` means Cisco NSO 4.2.3. This is what the `nso-ts`\nDockerfile will use in its `FROM` line.\n\nUpgrading to a new version of NCS is thus just a matter of rewriting the tag…\nbut what version of NCS should we use? There’s 4.2.4, 4.3.3, 4.4.2 and 4.4.3\navailable and I’m sure there’s some other version that will pop up its evil\nhead soon enough. How do I know which version to pick? And will our current code\nwork with the new version?\n\nTo help myself in the choice of NCS version I implemented a script that gets the\nREADME file of a new NCS version and cross references the list of fixed issues\nwith the issues that we currently have open in the Tail-F issue tracker. The\noutput of this is included in the merge request description so when I look at\nthe merge request I immediately know what bugs are fixed or new features are\nimplemented by moving to a specific version. Having this automatically generated\nfor us is… well, it’s just damn convenient. Together with actually testing our\ncode with the new version of NCS gives us confidence that an upgrade will be smooth.\n\nHere are the merge requests currently opened by our GitBot:\n\n\u003Cimg src=\"/images/blogimages/automate-git-merge-requests.png\" alt=\"Merge requests automated by Git bot\" style=\"width: 700px;\"/>{: .shadow}\n\nWe can see how the system have generated MRs to move to all the different\nversions of NSO currently available. As we are currently on NSO v4.2.3 there’s\nno underlying branch for that one leading to an errored build. For the other\nversions though, there is a branch per version that executes the CI pipeline to\nmake sure all our code runs with this version of NSO.\n\nAs there have been a few commits today, these branches are behind by six commits\nbut will be rebased this night so we get an up-to-date picture if they work or\nnot with our latest code.\n\n\u003Cimg src=\"/images/blogimages/automate-git-commits.png\" alt=\"Commits\" style=\"width: 700px;\"/>{: .shadow}\n\nIf we go back and look at one of these merge requests, we can see how the\ndescription includes information about what issues that we currently have open\nwith Cisco / Tail-F would be solved by moving to this version.\n\n\u003Cimg src=\"/images/blogimages/automate-git-mr-description.png\" alt=\"Merge request descriptions\" style=\"width: 700px;\"/>{: .shadow}\n\nThis is from v4.2.4 and as we are currently on v4.2.3 we can see that there are\nonly a few fixed issues.\n\nIf we instead look at v4.4.3 we can see that the list is significantly longer.\n\n\u003Cimg src=\"/images/blogimages/automate-git-mr-description-list.png\" alt=\"Merge request descriptions\" style=\"width: 700px;\"/>{: .shadow}\n\nPretty sweet, huh? :)\n\nAs this involves a bit more code I’ve put the relevant files in a [GitHub gist](https://gist.github.com/plajjan/42592665afd5ae045ee36220e19919aa).\n\n## This is the end\n\nIf you are reading this, chances are you already have your reasons for why you\nwant to automate some Git operations. Hopefully I’ve provided some inspiration\nfor how to do it.\n\nIf not or if you just want to discuss the topic in general or have more specific\nquestions about our setup, please do reach out to me on [Twitter](https://twitter.com/plajjan).\n\n_[This post](http://plajjan.github.io/automating-git/) was originally published on [plajjan.github.io](http://plajjan.github.io/)._\n\n## About the Guest Author\n\nKristian Larsson is a network automation systems architect at Deutsche Telekom.\nHe is working on automating virtually all aspects of running TeraStream, the\ndesign for Deutsche Telekom's next generation fixed network, using robust and\nfault tolerant software. He is active in the IETF as well as being a\nrepresenting member in OpenConfig. Previous to joining Deutsche Telekom,\nKristian was the IP & opto network architect for Tele2's international backbone\nnetwork.\n\n\"[BB-8 in action](https://unsplash.com/photos/C8VWyZhcIIU) by [Joseph Chan](https://unsplash.com/@yulokchan) on Unsplash\n{: .note}\n",[110,4440,702],{"slug":7964,"featured":6,"template":678},"automating-boring-git-operations-gitlab-ci","content:en-us:blog:automating-boring-git-operations-gitlab-ci.yml","Automating Boring Git Operations Gitlab Ci","en-us/blog/automating-boring-git-operations-gitlab-ci.yml","en-us/blog/automating-boring-git-operations-gitlab-ci",{"_path":7970,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7971,"content":7976,"config":7981,"_id":7983,"_type":16,"title":7984,"_source":17,"_file":7985,"_stem":7986,"_extension":20},"/en-us/blog/scaling-the-gitlab-database",{"title":7972,"description":7973,"ogTitle":7972,"ogDescription":7973,"noIndex":6,"ogImage":5904,"ogUrl":7974,"ogSiteName":692,"ogType":693,"canonicalUrls":7974,"schema":7975},"Scaling the GitLab database","An in-depth look at the challenges faced when scaling the GitLab database and the solutions we applied to help solve the problems with our database setup.","https://about.gitlab.com/blog/scaling-the-gitlab-database","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Scaling the GitLab database\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Yorick Peterse\"}],\n        \"datePublished\": \"2017-10-02\",\n      }",{"title":7972,"description":7973,"authors":7977,"heroImage":5904,"date":7978,"body":7979,"category":14,"tags":7980},[6025],"2017-10-02","\nFor a long time GitLab.com used a single PostgreSQL database server and a single\nreplica for disaster recovery purposes. This worked reasonably well for the\nfirst few years of GitLab.com's existence, but over time we began seeing more and\nmore problems with this setup. In this article we'll take a look at what we did\nto help solve these problems for both GitLab.com and self-managed GitLab\ninstances.\n\n\u003C!-- more -->\n\nFor example, the database was under constant pressure, with CPU utilization\nhovering around 70 percent almost all the time. Not because we used all\navailable resources in the best way possible, but because we were bombarding the\nserver with too many (badly optimized) queries. We realized we needed a better\nsetup that would allow us to balance the load and make GitLab.com more resilient\nto any problems that may occur on the primary database server.\n\nWhen tackling these problems using PostgreSQL there are essentially four\ntechniques you can apply:\n\n1. Optimize your application code so the queries are more efficient (and\n   ideally use fewer resources).\n2. Use a connection pooler to reduce the number of\n   database connections (and associated resources) necessary.\n3. Balance the load across multiple database servers.\n4. Shard your database.\n\nOptimizing the application code is something we have been working on actively\nfor the past two years, but it's not a final solution. Even if you improve\nperformance, when traffic also increases you may still need to apply the other\ntwo techniques. For the sake of this article we'll skip over this particular\nsubject and instead focus on the other techniques.\n\n## Connection pooling\n\nIn PostgreSQL a connection is handled by starting an OS process which in turn\nneeds a number of resources. The more connections (and thus processes), the more\nresources your database will use. PostgreSQL also enforces a maximum number of\nconnections as defined in the [max_connections][max-connections] setting. Once\nyou hit this limit PostgreSQL will reject new connections. Such a setup can be\nillustrated using the following diagram:\n\n{: .text-center}\n![PostgreSQL Diagram](https://about.gitlab.com/images/scaling-the-gitlab-database/postgresql.svg)\n\nHere our clients connect directly to PostgreSQL, thus requiring one connection\nper client.\n\nBy pooling connections we can have multiple client-side connections reuse\nPostgreSQL connections. For example, without pooling we'd need 100 PostgreSQL\nconnections to handle 100 client connections; with connection pooling we may\nonly need 10 or so PostgreSQL connections depending on our configuration. This\nmeans our connection diagram will instead look something like the following:\n\n{: .text-center}\n![Connection Pooling Diagram](https://about.gitlab.com/images/scaling-the-gitlab-database/pooler.svg)\n\nHere we show an example where four clients connect to pgbouncer but instead of\nusing four PostgreSQL connections we only need two of them.\n\nFor PostgreSQL there are two connection poolers that are most commonly used:\n\n* [pgbouncer][pgbouncer]\n* [pgpool-II][pgpool]\n\npgpool is a bit special because it does much more than just connection pooling:\nit has a built-in query caching mechanism, can balance load across multiple\ndatabases, manage replication, and more.\n\nOn the other hand pgbouncer is much simpler: all it does is connection pooling.\n\n## Database load balancing\n\nLoad balancing on the database level is typically done by making use of\nPostgreSQL's \"[hot standby][hot-standby]\" feature. A hot-standby is a PostgreSQL\nreplica that allows you to run read-only SQL queries, contrary to a regular\nstandby that does not allow any SQL queries to be executed. To balance load\nyou'd set up one or more hot-standby servers and somehow balance read-only\nqueries across these hosts while sending all other operations to the primary.\nScaling such a setup is fairly easy: simply add more hot-standby servers (if\nnecessary) as your read-only traffic increases.\n\nAnother benefit of this approach is having a more resilient database cluster.\nWeb requests that only use a secondary can continue to operate even if the\nprimary server is experiencing issues; though of course you may still run into\nerrors should those requests end up using the primary.\n\nThis approach however can be quite difficult to implement. For example, explicit\ntransactions must be executed on the primary since they may contain writes.\nFurthermore, after a write we want to continue using the primary for a little\nwhile because the changes may not yet be available on the hot-standby servers\nwhen using asynchronous replication.\n\n## Sharding\n\nSharding is the act of horizontally partitioning your data. This means that data\nresides on specific servers and is retrieved using a shard key. For example, you\nmay partition data per project and use the project ID as the shard key. Sharding\na database is interesting when you have a very high write load (as there's no\nother easy way of balancing writes other than perhaps a multi-master setup), or\nwhen you have _a lot_ of data and you can no longer store it in a conventional\nmanner (e.g. you simply can't fit it all on a single disk).\n\nUnfortunately the process of setting up a sharded database is a massive\nundertaking, even when using software such as [Citus][citus]. Not only do you\nneed to set up the infrastructure (which varies in complexity depending on\nwhether you run it yourself or use a hosted solution), but you also need to\nadjust large portions of your application to support sharding.\n\n### Cases against sharding\n\nOn GitLab.com the write load is typically very low, with most of the database\nqueries being read-only queries. In very exceptional cases we may spike to 1500\ntuple writes per second, but most of the time we barely make it past 200 tuple\nwrites per second. On the other hand we can easily read up to 10 million tuples\nper second on any given secondary.\n\nStorage-wise, we also don't use that much data: only about 800 GB. A large\nportion of this data is data that is being migrated in the background. Once\nthose migrations are done we expect our database to shrink in size quite a bit.\n\nThen there's the amount of work required to adjust the application so all\nqueries use the right shard keys. While quite a few of our queries usually\ninclude a project ID which we could use as a shard key, there are also many\nqueries where this isn't the case. Sharding would also affect the process of\ncontributing changes to GitLab as every contributor would now have to make sure\na shard key is present in their queries.\n\nFinally, there is the infrastructure that's necessary to make all of this work.\nServers have to be set up, monitoring has to be added, engineers have to be\ntrained so they are familiar with this new setup, the list goes on. While hosted\nsolutions may remove the need for managing your own servers it doesn't solve all\nproblems. Engineers still have to be trained and (most likely very expensive)\nbills have to be paid. At GitLab we also highly prefer to ship the tools we need\nso the community can make use of them. This means that if we were going to shard\nthe database we'd have to ship it (or at least parts of it) in our Omnibus\npackages. The only way you can make sure something you ship works is by running\nit yourself, meaning we wouldn't be able to use a hosted solution.\n\nUltimately we decided against sharding the database because we felt it was an\nexpensive, time-consuming, and complex solution to a problem we do not have.\n\n## Connection pooling for GitLab\n\nFor connection pooling we had two main requirements:\n\n1. It has to work well (obviously).\n2. It has to be easy to ship in our Omnibus packages so our users can also take\n   advantage of the connection pooler.\n\nReviewing the two solutions (pgpool and pgbouncer) was done in two steps:\n\n1. Perform various technical tests (does it work, how easy is it to configure,\n   etc).\n2. Find out what the experiences are of other users of the solution, what\n   problems they ran into and how they dealt with them, etc.\n\npgpool was the first solution we looked into, mostly because it seemed quite\nattractive based on all the features it offered. Some of the data from our tests\ncan be found in [this][pgpool-comment-data] comment.\n\nUltimately we decided against using pgpool based on a number of factors. For\nexample, pgpool does not support sticky connections. This is problematic when\nperforming a write and (trying to) display the results right away. Imagine\ncreating an issue and being redirected to the page, only to run into an HTTP 404\nerror because the server used for any read-only queries did not yet have the\ndata. One way to work around this would be to use synchronous replication, but\nthis brings many other problems to the table; problems we prefer to avoid.\n\nAnother problem is that pgpool's load balancing logic is decoupled from your\napplication and operates by parsing SQL queries and sending them to the right\nserver. Because this happens outside of your application you have very little\ncontrol over which query runs where. This may actually be beneficial to some\nbecause you don't need additional application logic, but it also prevents you\nfrom adjusting the routing logic if necessary.\n\nConfiguring pgpool also proved quite difficult due to the sheer number of\nconfiguration options. Perhaps the final nail in the coffin was the feedback we\ngot on pgpool from those having used it in the past. The feedback we received\nregarding pgpool was usually negative, though not very detailed in most cases.\nWhile most of the complaints appeared to be related to earlier versions of\npgpool it still made us doubt if using it was the right choice.\n\nThe feedback combined with the issues described above ultimately led to us\ndeciding against using pgpool and using pgbouncer instead. We performed a\nsimilar set of tests with pgbouncer and were very satisfied with it. It's fairly\neasy to configure (and doesn't have that much that needs configuring in the\nfirst place), relatively easy to ship, focuses only on connection pooling (and\ndoes it really well), and had very little (if any) noticeable overhead. Perhaps\nmy only complaint would be that the pgbouncer website can be a little bit hard\nto navigate.\n\nUsing pgbouncer we were able to drop the number of active PostgreSQL connections\nfrom a few hundred to only 10-20 by using transaction pooling. We opted for\nusing transaction pooling since Rails database connections are persistent. In\nsuch a setup, using session pooling would prevent us from being able to reduce\nthe number of PostgreSQL connections, thus brining few (if any) benefits. By\nusing transaction pooling we were able to drop PostgreSQL's `max_connections`\nsetting from 3000 (the reason for this particular value was never really clear)\nto 300. pgbouncer is configured in such a way that even at peak capacity we will\nonly need 200 connections; giving us some room for additional connections such\nas `psql` consoles and maintenance tasks.\n\nA side effect of using transaction pooling is that you cannot use prepared\nstatements, as the `PREPARE` and `EXECUTE` commands may end up running in\ndifferent connections; producing errors as a result. Fortunately we did not\nmeasure any increase in response timings when disabling prepared statements, but\nwe _did_ measure a reduction of roughly 20 GB in memory usage on our database\nservers.\n\nTo ensure both web requests and background jobs have connections available we\nset up two separate pools: one pool of 150 connections for background\nprocessing, and a pool of 50 connections for web requests. For web requests we\nrarely need more than 20 connections, but for background processing we can\neasily spike to a 100 connections simply due to the large number of background\nprocesses running on GitLab.com.\n\nToday we ship pgbouncer as part of GitLab EE's High Availability package. For\nmore information you can refer to\n[\"Omnibus GitLab PostgreSQL High Availability.\"][ha-docs]\n\n## Database load balancing for GitLab\n\nWith pgpool and its load balancing feature out of the picture we needed\nsomething else to spread load across multiple hot-standby servers.\n\nFor (but not limited to) Rails applications there is a library called\n[Makara][makara] which implements load balancing logic and includes a default\nimplementation for ActiveRecord. Makara however has some problems that were a\ndeal-breaker for us. For example, its support for sticky connections is very\nlimited: when you perform a write the connection will stick to the primary using\na cookie, with a fixed TTL. This means that if replication lag is greater than\nthe TTL you may still end up running a query on a host that doesn't have the\ndata you need.\n\nMakara also requires you to configure quite a lot, such as all the database hosts\nand their roles, with no service discovery mechanism (our current solution does\nnot yet support this either, though it's planned for the near future). Makara\nalso [does not appear to be thread-safe][makara-thread-safe], which is\nproblematic since Sidekiq (the background processing system we use) is\nmulti-threaded. Finally, we wanted to have control over the load balancing logic\nas much as possible.\n\nBesides Makara there's also [Octopus][octopus] which has some load balancing\nmechanisms built in. Octopus however is geared towards database sharding and not\njust balancing of read-only queries. As a result we did not consider using\nOctopus.\n\nUltimately this led to us building our own solution directly into GitLab EE.\nThe merge request adding the initial implementation can be found [here][lb-mr],\nthough some changes, improvements, and fixes were applied later on.\n\nOur solution essentially works by replacing `ActiveRecord::Base.connection` with\na proxy object that handles routing of queries. This ensures we can load balance\nas many queries as possible, even queries that don't originate directly from our\nown code. This proxy object in turn determines what host a query is sent to\nbased on the methods called, removing the need for parsing SQL queries.\n\n### Sticky connections\n\nSticky connections are supported by storing a pointer to the current PostgreSQL\nWAL position the moment a write is performed. This pointer is then stored in\nRedis for a short duration at the end of a request. Each user is given their own\nkey so that the actions of one user won't lead to all other users being\naffected. In the next request we get the pointer and compare this with all the\nsecondaries. If all secondaries have a WAL pointer that exceeds our pointer we\nknow they are in sync and we can safely use a secondary for our read-only\nqueries. If one or more secondaries are not yet in sync we will continue using\nthe primary until they are in sync. If no write is performed for 30 seconds and\nall the secondaries are still not in sync we'll revert to using the secondaries\nin order to prevent somebody from ending up running queries on the primary\nforever.\n\nChecking if a secondary has caught up is quite simple and is implemented in\n`Gitlab::Database::LoadBalancing::Host#caught_up?` as follows:\n\n```ruby\ndef caught_up?(location)\n  string = connection.quote(location)\n\n  query = \"SELECT NOT pg_is_in_recovery() OR \" \\\n    \"pg_xlog_location_diff(pg_last_xlog_replay_location(), #{string}) >= 0 AS result\"\n\n  row = connection.select_all(query).first\n\n  row && row['result'] == 't'\nensure\n  release_connection\nend\n```\n\nMost of the code here is standard Rails code to run raw queries and grab the\nresults. The most interesting part is the query itself, which is as follows:\n\n```sql\nSELECT NOT pg_is_in_recovery()\nOR pg_xlog_location_diff(pg_last_xlog_replay_location(), WAL-POINTER) >= 0 AS result\"\n```\n\nHere `WAL-POINTER` is the WAL pointer as returned by the PostgreSQL function\n`pg_current_xlog_insert_location()`, which is executed on the primary. In the\nabove code snippet the pointer is passed as an argument, which is then\nquoted/escaped and passed to the query.\n\nUsing the function `pg_last_xlog_replay_location()` we can get the WAL pointer\nof a secondary, which we can then compare to our primary pointer using\n`pg_xlog_location_diff()`. If the result is greater than 0 we know the secondary\nis in sync.\n\nThe check `NOT pg_is_in_recovery()` is added to ensure the query won't fail when\na secondary that we're checking was _just_ promoted to a primary and our\nGitLab process is not yet aware of this. In such a case we simply return `true`\nsince the primary is always in sync with itself.\n\n### Background processing\n\nOur background processing code _always_ uses the primary since most of the work\nperformed in the background consists of writes. Furthermore we can't reliably\nuse a hot-standby as we have no way of knowing whether a job should use the\nprimary or not as many jobs are not directly tied into a user.\n\n### Connection errors\n\nTo deal with connection errors our load balancer will not use a secondary if it\nis deemed to be offline, plus connection errors on any host (including the\nprimary) will result in the load balancer retrying the operation a few times.\nThis ensures that we don't immediately display an error page in the event of a\nhiccup or a database failover. While we also deal with [hot standby\nconflicts][hot-standby-conflicts] on the load balancer level we ended up\nenabling `hot_standby_feedback` on our secondaries as doing so solved all\nhot-standby conflicts without having any negative impact on table bloat.\n\nThe procedure we use is quite simple: for a secondary we'll retry a few times\nwith no delay in between. For a primary we'll retry the operation a few times\nusing an exponential backoff.\n\nFor more information you can refer to the source code in GitLab EE:\n\n* \u003Chttps://gitlab.com/gitlab-org/gitlab-ee/tree/master/ee/lib/gitlab/database/load_balancing.rb>\n* \u003Chttps://gitlab.com/gitlab-org/gitlab-ee/tree/master/ee/lib/gitlab/database/load_balancing>\n\nDatabase load balancing was first introduced in GitLab 9.0 and _only_ supports\nPostgreSQL. More information can be found in the [9.0 release post][9-0-release]\nand the [documentation](https://docs.gitlab.com/ee/administration/postgresql/database_load_balancing.html).\n\n## Crunchy Data\n\nIn parallel to working on implementing connection pooling and load balancing we\nwere working with [Crunchy Data][crunchy]. Until very recently I was the only\n[database specialist][database-specialist] which meant I had a lot of work on my\nplate. Furthermore my knowledge of PostgreSQL internals and its wide range of\nsettings is limited (or at least was at the time), meaning there's only so much\nI could do. Because of this we hired Crunchy to help us out with identifying\nproblems, investigating slow queries, proposing schema optimisations, optimising\nPostgreSQL settings, and much more.\n\nFor the duration of this cooperation most work was performed in confidential\nissues so we could share private data such as log files. With the cooperation\ncoming to an end we have removed sensitive information from some of these issues\nand opened them up to the public. The primary issue was\n[gitlab-com/infrastructure#1448][issue-1448], which in turn led to many separate\nissues being created and resolved.\n\nThe benefit of this cooperation was immense as it helped us identify and solve\nmany problems, something that would have taken me months to identify and solve\nif I had to do this all by myself.\n\nFortunately we recently managed to hire our [second database specialist][gstark]\nand we hope to grow the team more in the coming months.\n\n## Combining connection pooling and database load balancing\n\nCombining connection pooling and database load balancing allowed us to\ndrastically reduce the number of resources necessary to run our database cluster\nas well as spread load across our hot-standby servers. For example, instead of\nour primary having a near constant CPU utilisation of 70 percent today it\nusually hovers between 10 percent and 20 percent, while our two hot-standby\nservers hover around 20 percent most of the time:\n\n![CPU Percentage](https://about.gitlab.com/images/scaling-the-gitlab-database/cpu-percentage.png)\n\nHere `db3.cluster.gitlab.com` is our primary while the other two hosts are our\nsecondaries.\n\nOther load-related factors such as load averages, disk usage, and memory usage\nwere also drastically improved. For example, instead of the primary having a\nload average of around 20 it barely goes above an average of 10:\n\n![CPU Percentage](https://about.gitlab.com/images/scaling-the-gitlab-database/load-averages.png)\n\nDuring the busiest hours our secondaries serve around 12 000 transactions per\nsecond (roughly 740 000 per minute), while the primary serves around 6 000\ntransactions per second (roughly 340 000 per minute):\n\n![Transactions Per Second](https://about.gitlab.com/images/scaling-the-gitlab-database/transactions.png)\n\nUnfortunately we don't have any data on the transaction rates prior to deploying\npgbouncer and our database load balancer.\n\nAn up-to-date overview of our PostgreSQL statistics can be found at our [public\nGrafana dashboard][postgres-stats].\n\nSome of the settings we have set for pgbouncer are as follows:\n\n| Setting              | Value       |\n|----------------------|-------------|\n| default_pool_size    | 100         |\n| reserve_pool_size    | 5           |\n| reserve_pool_timeout | 3           |\n| max_client_conn      | 2048        |\n| pool_mode            | transaction |\n| server_idle_timeout  | 30          |\n\nWith that all said there is still some work left to be done such as:\nimplementing service discovery ([#2042][issue-2042]), improving how we check if\na secondary is available ([#2866][issue-2866]), and ignoring secondaries that\nare too far behind the primary ([#2197][issue-2197]).\n\nIt's worth mentioning that we currently do not have any plans of turning our\nload balancing solution into a standalone library that you can use outside of\nGitLab, instead our focus is on providing a solid load balancing solution for\nGitLab EE.\n\nIf this has gotten you interested and you enjoy working with databases,\nimproving application performance, and adding database-related features to\nGitLab (such as [service discovery][issue-2042]) you should definitely check out\nthe [job opening][job-opening] and the [database specialist handbook\nentry][database-specialist] for more information.\n\n[max-connections]: https://www.postgresql.org/docs/9.6/static/runtime-config-connection.html#GUC-MAX-CONNECTIONS\n[pgbouncer]: https://pgbouncer.github.io/\n[pgpool]: http://pgpool.net/mediawiki/index.php/Main_Page\n[hot-standby]: https://www.postgresql.org/docs/9.6/static/hot-standby.html\n[pgpool-comment-data]: https://gitlab.com/gitlab-com/infrastructure/issues/259#note_23464570\n[ha-docs]: https://docs.gitlab.com/ee/administration/postgresql/index.html\n[makara]: https://github.com/taskrabbit/makara\n[makara-thread-safe]: https://github.com/taskrabbit/makara/issues/151\n[lb-mr]: https://gitlab.com/gitlab-org/gitlab-ee/merge_requests/1283\n[issue-2042]: https://gitlab.com/gitlab-org/gitlab-ee/issues/2042\n[issue-2866]: https://gitlab.com/gitlab-org/gitlab-ee/issues/2866\n[issue-2197]: https://gitlab.com/gitlab-org/gitlab-ee/issues/2197\n[9-0-release]: /releases/2017/03/22/gitlab-9-0-released/\n[lb-docs]: https://docs.gitlab.com/ee/administration/database_load_balancing.html\n[postgres-stats]: https://dashboards.gitlab.com/dashboard/db/postgresql-overview?refresh=5m&orgId=1\n[hot-standby-conflicts]: https://www.postgresql.org/docs/current/static/hot-standby.html#HOT-STANDBY-CONFLICT\n[citus]: https://www.citusdata.com/\n[octopus]: https://github.com/thiagopradi/octopus\n[crunchy]: https://www.crunchydata.com/\n[database-specialist]: /handbook/engineering/infrastructure/database/\n[job-opening]: /job-families/engineering/database-engineer/\n[issue-1448]: https://gitlab.com/gitlab-com/infrastructure/issues/1448\n[gstark]: https://gitlab.com/_stark\n",[915,1286],{"slug":7982,"featured":6,"template":678},"scaling-the-gitlab-database","content:en-us:blog:scaling-the-gitlab-database.yml","Scaling The Gitlab Database","en-us/blog/scaling-the-gitlab-database.yml","en-us/blog/scaling-the-gitlab-database",{"_path":7988,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":7989,"content":7995,"config":8002,"_id":8004,"_type":16,"title":8005,"_source":17,"_file":8006,"_stem":8007,"_extension":20},"/en-us/blog/how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab",{"title":7990,"description":7991,"ogTitle":7990,"ogDescription":7991,"noIndex":6,"ogImage":7992,"ogUrl":7993,"ogSiteName":692,"ogType":693,"canonicalUrls":7993,"schema":7994},"CI/CD pipeline: GitLab & Helm for Kubernetes Auto Deploy","One user walks through how he tried GitLab caching and split the job into multiple steps to get better feedback.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664472/Blog/Hero%20Images/gitlabflatlogomap.png","https://about.gitlab.com/blog/how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to create a CI/CD pipeline with Auto Deploy to Kubernetes using GitLab and Helm\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sergey Nuzhdin\"}],\n        \"datePublished\": \"2017-09-21\",\n      }",{"title":7996,"description":7991,"authors":7997,"heroImage":7992,"date":7999,"body":8000,"category":14,"tags":8001},"How to create a CI/CD pipeline with Auto Deploy to Kubernetes using GitLab and Helm",[7998],"Sergey Nuzhdin","2017-09-21","Recently, I started working on a few Golang [microservices](/topics/microservices/). I decided to try GitLab’s caching and split the job into multiple steps for better feedback in the UI.\n\n\u003C!-- more -->\n\nSince my previous posts[[1](http://blog.lwolf.org/post/how-to-build-tiny-golang-docker-images-with-gitlab-ci/)][[2](http://blog.lwolf.org/post/continuous-deployment-to-kubernetes-from-gitlab-ci/)] about [CI/CD](/topics/ci-cd/), a lot has changed. I started using Helm charts for packaging applications, and stopped using docker-in-docker in gitlab-runner.\n\nHere are a few of the main changes to my `.gitlab-ci.yml` file since my previous post:\n\n* no docker-in-docker\n* using cache for packages instead of a prebuilt image with dependencies\n* splitting everything into multiple steps\n* autodeploy to staging environment using Helm, a package manager for Kubernetes\n\n### Building Golang image\n\nSince Golang is very strict about the location of the project, we need to make some adjustments to the CI job. This is done in the `before_script` block. Simply create needed directories and link source code in there. Assuming that the official repository of the project is `gitlab.example.com/librerio/libr_files` it should look like this.\n\n```\nvariables:\n  APP_PATH: /go/src/gitlab.example.com/librerio/libr_files\n\nbefore_script:\n  - mkdir -p /go/src/gitlab.example.com/librerio/\n  - ln -s $PWD ${APP_PATH}\n  - mkdir -p ${APP_PATH}/vendor\n  - cd ${APP_PATH}\n```\n\nWith this in place, we can install dependencies and build our binaries. To avoid the download of all packages on each build we need to configure caching. Due to the strange caching rules of GitLab, we need to add vendor directory to both cache and artifacts. Cache will give us an ability to use it between build jobs and artifacts will allow us to use it inside the same job.\n\n```\n\ncache:\n  untracked: true\n  key: \"$CI_BUILD_REF_NAME\"\n  paths:\n    - vendor/\n\nsetup:\n  stage: setup\n  image: lwolf/golang-glide:0.12.3\n  script:\n    - glide install -v\n  artifacts:\n    paths:\n     - vendor/\n\n```\n\nBuild step didn’t change, it’s still about building the binary. I add binary to artifacts.\n\n```\nbuild:\n  stage: build\n  image: lwolf/golang-glide:0.12.3\n  script:\n    - cd ${APP_PATH}\n    - GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o release/app -ldflags '-w -s'\n    - cd release\n  artifacts:\n    paths:\n     - release/\n```\n\n###  Test stage\n\nTo run golang tests with coverage reports I’m using the variation of [this shell script](https://github.com/mlafeldt/chef-runner/blob/v0.7.0/script/coverage). It runs all tests in project subdirectories and creates a [coverage report](/blog/publish-code-coverage-report-with-gitlab-pages/). I changed it a bit before putting into a gist. I exclude vendor directory from tests.\n\n* coverage regexp for gitlab-ci: `^total:\\s*\\(statements\\)\\s*(\\d+.\\d+\\%)`\n\n### Deploy stage\n\nI don’t use native GitLab’s integration with Kubernetes.\n\nFirst I thought about creating Kubernetes secrets and mounting it to the gitlab-runner pod. But it’s very complicated. You need to upgrade deployment every time you want to add new Kubernetes cluster configurations. So I’m using GitLab’s CI/CD variables with base64 encoded Kubernetes config. Each project can have any number of configurations. The process is easy – create base64 string from the configuration file and copy it to the clipboard. After this, put it into `kube_config` variable (name it whatever you like).\n\n`cat ~/.kube/config | base64 | pbcopy`\n\nIf you do not own a full GitLab installation, consider creating a Kubernetes user with restricted permissions.\n\nThen on the deploy stage, we can decode this variable back into the file and use it with kubectl.\n\n```\nvariables:\n  KUBECONFIG: /etc/deploy/config\n\ndeploy:\n  ...\n  before_script:\n    - mkdir -p /etc/deploy\n    - echo ${kube_config} | base64 -d > ${KUBECONFIG}\n    - kubectl config use-context homekube\n    - helm init --client-only\n    - helm repo add stable https://kubernetes-charts.storage.googleapis.com/\n    - helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/\n    - helm repo update\n```\n\nDeploy stage also covers the case when you have several versions of the same application.\n\nFor example, you have two versions of API: v1.0 and v1.1. All you need to do is set `appVersion` in Chart.yaml file. Build system will check API version and either deploy or upgrade needed release.\n\n```\n- export API_VERSION=\"$(grep \"appVersion\" Chart.yaml | cut -d\" \" -f2)\"\n- export RELEASE_NAME=\"libr-files-v${API_VERSION/./-}\"\n- export DEPLOYS=$(helm ls | grep $RELEASE_NAME | wc -l)\n- if [ ${DEPLOYS}  -eq 0 ]; then helm install --name=${RELEASE_NAME} . --namespace=${STAGING_NAMESPACE}; else helm upgrade ${RELEASE_NAME} . --namespace=${STAGING_NAMESPACE}; fi\n```\n\n### tl;dr\n\n```\nHere is complete `.gitlab-ci.yaml` file for reference.\n\ncache:\n  untracked: true\n  key: \"$CI_BUILD_REF_NAME\"\n  paths:\n    - vendor/\n\nbefore_script:\n  - mkdir -p /go/src/gitlab.example.com/librerio/\n  - ln -s $PWD ${APP_PATH}\n  - mkdir -p ${APP_PATH}/vendor\n  - cd ${APP_PATH}\n\nstages:\n  - setup\n  - test\n  - build\n  - release\n  - deploy\n\nvariables:\n  CONTAINER_IMAGE: ${CI_REGISTRY}/${CI_PROJECT_PATH}:${CI_BUILD_REF_NAME}_${CI_BUILD_REF}\n  CONTAINER_IMAGE_LATEST: ${CI_REGISTRY}/${CI_PROJECT_PATH}:latest\n  DOCKER_DRIVER: overlay2\n\n  KUBECONFIG: /etc/deploy/config\n  STAGING_NAMESPACE: app-stage\n  PRODUCTION_NAMESPACE: app-prod\n\n  APP_PATH: /go/src/gitlab.example.com/librerio/libr_files\n  POSTGRES_USER: gorma\n  POSTGRES_DB: test-${CI_BUILD_REF}\n  POSTGRES_PASSWORD: gorma\n\nsetup:\n  stage: setup\n  image: lwolf/golang-glide:0.12.3\n  script:\n    - glide install -v\n  artifacts:\n    paths:\n     - vendor/\n\nbuild:\n  stage: build\n  image: lwolf/golang-glide:0.12.3\n  script:\n    - cd ${APP_PATH}\n    - GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o release/app -ldflags '-w -s'\n    - cd release\n  artifacts:\n    paths:\n     - release/\n\nrelease:\n  stage: release\n  image: docker:latest\n  script:\n    - cd ${APP_PATH}/release\n    - docker login -u gitlab-ci-token -p ${CI_BUILD_TOKEN} ${CI_REGISTRY}\n    - docker build -t ${CONTAINER_IMAGE} .\n    - docker tag ${CONTAINER_IMAGE} ${CONTAINER_IMAGE_LATEST}\n    - docker push ${CONTAINER_IMAGE}\n    - docker push ${CONTAINER_IMAGE_LATEST}\n\ntest:\n  stage: test\n  image: lwolf/golang-glide:0.12.3\n  services:\n    - postgres:9.6\n  script:\n    - cd ${APP_PATH}\n    - curl -o coverage.sh https://gist.githubusercontent.com/lwolf/3764a3b6cd08387e80aa6ca3b9534b8a/raw\n    - sh coverage.sh\n\ndeploy_staging:\n  stage: deploy\n  image: lwolf/helm-kubectl-docker:v152_213\n  before_script:\n    - mkdir -p /etc/deploy\n    - echo ${kube_config} | base64 -d > ${KUBECONFIG}\n    - kubectl config use-context homekube\n    - helm init --client-only\n    - helm repo add stable https://kubernetes-charts.storage.googleapis.com/\n    - helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/\n    - helm repo update\n  script:\n    - cd deploy/libr-files\n    - helm dep build\n    - export API_VERSION=\"$(grep \"appVersion\" Chart.yaml | cut -d\" \" -f2)\"\n    - export RELEASE_NAME=\"libr-files-v${API_VERSION/./-}\"\n    - export DEPLOYS=$(helm ls | grep $RELEASE_NAME | wc -l)\n    - if [ ${DEPLOYS}  -eq 0 ]; then helm install --name=${RELEASE_NAME} . --namespace=${STAGING_NAMESPACE}; else helm upgrade ${RELEASE_NAME} . --namespace=${STAGING_NAMESPACE}; fi\n  environment:\n    name: staging\n    url: https://librerio.example.com\n  only:\n  - master\n\n```\n\n_[How to create a CI/CD pipeline with Auto Deploy to Kubernetes using GitLab and Helm](http://blog.lwolf.org/post/how-to-create-ci-cd-pipeline-with-autodeploy-k8s-gitlab-helm/) was originally published on Lwolfs Blog._\n\nPhoto by C Chapman on [Unsplash](https://unsplash.com/)",[832,937,726,4440],{"slug":8003,"featured":6,"template":678},"how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab","content:en-us:blog:how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab.yml","How To Create A Ci Cd Pipeline With Auto Deploy To Kubernetes Using Gitlab","en-us/blog/how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab.yml","en-us/blog/how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab",{"_path":8009,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8010,"content":8016,"config":8022,"_id":8024,"_type":16,"title":8025,"_source":17,"_file":8026,"_stem":8027,"_extension":20},"/en-us/blog/vuejs-app-gitlab",{"title":8011,"description":8012,"ogTitle":8011,"ogDescription":8012,"noIndex":6,"ogImage":8013,"ogUrl":8014,"ogSiteName":692,"ogType":693,"canonicalUrls":8014,"schema":8015},"How to use GitLab CI/CD for Vue.js","Learn how to get the most out of GitLab CI/CD with this guide.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680363/Blog/Hero%20Images/build-test-deploy-vue.jpg","https://about.gitlab.com/blog/vuejs-app-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab CI/CD for Vue.js\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Simon Tarchichi\"}],\n        \"datePublished\": \"2017-09-12\",\n      }",{"title":8011,"description":8012,"authors":8017,"heroImage":8013,"date":8019,"body":8020,"category":14,"tags":8021},[8018],"Simon Tarchichi","2017-09-12","\n\nContinuous Integration allows you to:\n\n- Deploy your app instantly, when new code is pushed into a repo\n- Build your app (in our case `npm run build`)\n- Trigger test scripts (and block deployment if a test fails)\n\nIt is definitely worth the effort if you update your app regularly.\n\nGitLab is a service that started as an open-source GitHub competitor, mostly to host code in Git repositories, and evolved into an amazing tool that I won’t introduce here, as it isn’t related to Vue.js. One thing though, they were one of the first major companies to use Vue.js for their user interface.\n\nDocker has to be mentioned as well. It is the most popular containerization service. It basically means you get to execute code in a secure environment, configured exactly like your dev/prod. Very useful when you need to make sure your code is executed with all its dependencies.\n\nEach of these tools would require many posts to be covered. We’ll focus on setting up [CI/CD](/topics/ci-cd/) for your Vue.js project. We’ll assume you have no knowledge in the matter.\n\n[GitLab CI/CD is free for personal projects](/pricing/#gitlab-com), I don’t know any other tool with such a beautiful UI that does that. If you do, please let me know.\n\n### The .gitlab-ci.yml file\n\nCreate a `.gitlab-ci.yml` file at the root of your repo. GitLab will check for this file when new code is pushed. If the file is present, it will define a [pipeline](https://docs.gitlab.com/ee/ci/pipelines/index.html), executed by a [GitLab Runner](http://docs.gitlab.com/runner/). Click the links if you are curious, or keep reading to see a working example.\n\nDefault stages of a pipeline are:\n\n1. build\n1. test\n1. deploy\n\nAgain, you don’t need to master this, but this is the most common use case. You may not have set up unit tests, and if you haven’t, you may remove this step from the file, GitLab won’t mind.\n\nHere is our file, you may copy/paste it in your repo:\n\n```\nbuild site:\n  image: node:6\n  stage: build\n  script:\n    - npm install --progress=false\n    - npm run build\n  artifacts:\n    expire_in: 1 week\n    paths:\n      - dist\n\nunit test:\n  image: node:6\n  stage: test\n  script:\n    - npm install --progress=false\n    - npm run unit\n\ndeploy:\n  image: alpine\n  stage: deploy\n  script:\n    - apk add --no-cache rsync openssh\n    - mkdir -p ~/.ssh\n    - echo \"$SSH_PRIVATE_KEY\" >> ~/.ssh/id_dsa\n    - chmod 600 ~/.ssh/id_dsa\n    - echo -e \"Host *\\n\\tStrictHostKeyChecking no\\n\\n\" > ~/.ssh/config\n    - rsync -rav --delete dist/ user@server.com:/your/project/path/\n  ```\n\n### Test our file\n\nNow commit and push the `.gitlab-ci.yml` file to your GitLab repo.\n\nHere is how it will look in the Pipelines tab of GitLab UI:\n\n![GitLab CI/CD Pipelines](https://about.gitlab.com/images/blogimages/gitlab-ci-pipelines.png){: .shadow}\u003Cbr>\n\nThe green checkmark indicates that the step has succeeded and you can see the logs when clicking it.\n\nIn the second example, the tests have failed, click the red mark to read the logs and understand what went wrong.\n\n![GitLab CI/CD logs](https://about.gitlab.com/images/blogimages/gitlab-ci-failed.png){: .shadow}\u003Cbr>\n\n### File anatomy\n\n- `image` is the link to the Docker image. I have chosen to use public official images, but you may use one from the Docker Hub or a private registry.\n\n- `stage` should be `build`, `test` or `deploy` if you use defaults. But that [can be customized](https://docs.gitlab.com/ee/ci/yaml/stages).\n\n- `script` are command lines executed inside our build environment.\n\n- `artifacts` describes a path to the build result. The files in this path can be used in the next build steps (in `deploy` in our example). You can download artifacts from Gitlab UI.\n\nMore about the `.gitlab-ci.yml` file options [in the docs](https://docs.gitlab.com/ee/ci/yaml/).\n\n### About the deployment script\n\nI have described my use case here, but it may not be the simplest. Relevant examples for [deployment to Amazon S3](/blog/ci-deployment-and-environments/) or other services can be found online.\n\nTo get it working, you’ll need to **provide GitLab with a private SSH key**. If you are no security expert, then it is time to take advice from one. The bottom line is **do not give it your private SSH key**, create one that is used only by GitLab.\n\n```\n# create gitlab user\nadduser gitlab\n\n# generate a DSA SSH key\nsu -l gitlab\nssh-keygen -t dsa\n\n# authorize the key to log in using the public key and output the private one\ncd .ssh\nmv id_dsa.pub authorized_keys\ncat id_dsa && rm id_dsa\n```\n\nThen go to GitLab UI “Settings” (the gear icon), then “Variables” and copy/paste the content of your terminal in “Value”. The “Key” should be `SSH_PRIVATE_KEY`. This private key will be used to do the `rsync`.\n\n![GitLab CI/CD variables](https://about.gitlab.com/images/blogimages/gitlab-ci-variables.png){: .shadow}\u003Cbr>\n\n## Links\n\n- [Sample GitLab repository](https://gitlab.com/kartsims/vue-ci)\n- [Gitlab CI/CD docs](https://docs.gitlab.com/ee/ci/)\n\nIf you need more information, leave a comment I’ll be happy to help you if I can.\n\n\"[Golden Gate Bridge Vista Point](https://unsplash.com/@tigesphotos?photo=-BiEu8VP9-M)\" by [Tiger Robinson](https://unsplash.com/@tigesphotos) on Unsplash\n{: .note}\n",[110,4440],{"slug":8023,"featured":6,"template":678},"vuejs-app-gitlab","content:en-us:blog:vuejs-app-gitlab.yml","Vuejs App Gitlab","en-us/blog/vuejs-app-gitlab.yml","en-us/blog/vuejs-app-gitlab",{"_path":8029,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8030,"content":8036,"config":8041,"_id":8043,"_type":16,"title":8044,"_source":17,"_file":8045,"_stem":8046,"_extension":20},"/en-us/blog/how-to-automatically-create-a-new-mr-on-gitlab-with-gitlab-ci",{"title":8031,"description":8032,"ogTitle":8031,"ogDescription":8032,"noIndex":6,"ogImage":8033,"ogUrl":8034,"ogSiteName":692,"ogType":693,"canonicalUrls":8034,"schema":8035},"How to automatically create a new MR on GitLab with GitLab CI","With this script, every time we push a commit, GitLab CI checks if the branch that commit belongs to already has an open MR and, if not, creates one.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679783/Blog/Hero%20Images/whats-next-for-gitlab-ci.jpg","https://about.gitlab.com/blog/how-to-automatically-create-a-new-mr-on-gitlab-with-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to automatically create a new MR on GitLab with GitLab CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Riccardo Padovani\"}],\n        \"datePublished\": \"2017-09-05\",\n      }",{"title":8031,"description":8032,"authors":8037,"heroImage":8033,"date":8038,"body":8039,"category":14,"tags":8040},[7708],"2017-09-05","\n\nAt [fleetster](https://www.fleetster.net/), we have our own instance of [GitLab](https://gitlab.com/) and we rely a lot on [GitLab CI](/solutions/continuous-integration/). How could it be otherwise? We are a small team, with a lot of different projects (only in last month, we had more than **13,000 commits** over **25 different projects**, and we are only 10 people – with myself working part time). Automating as many development steps as possible (from build to QA to deploy) is helping us a lot, but sometimes we write some code and then forget about it. This is a disaster! We have some bug fix or some new feature ready, but it is forgotten in some branch somewhere.\n\n\u003C!-- more -->\n\nThis is why we have a policy to push as soon as possible to open a new MR, mark it as WIP, and assign to ourselves; in this way GitLab will remind us we have an MR.\n\nYou need to do three steps to achieve that:\n\n* Push the code\n* Click on the link that appears on your terminal\n* Fill a form\n\nBut we are nerds. We are lazy. So one night, after a couple of beers, [Alberto Urbano](https://www.linkedin.com/in/alberto-urbano-047a4b19/) and I spent some hours to automate a task that requires 10 seconds.\n\nActually, the experience was quite fun, it was the first time we used GitLab APIs and we learned things we will apply to others scripts as well.\n\n![Image via Riccardo's blog](https://about.gitlab.com/images/blogimages/automating-tasks-expectation-versus-reality.png){: .shadow}\u003Cbr>\n*Image by Randall Munroe, [xkcd.com](https://imgs.xkcd.com/comics/automation.png)*\n\n### The script\n\nWith this script, every time we push a commit, GitLab CI checks if the branch that commit belongs to already has an open MR and, if not, it creates it. It then assigns the MR to you, and puts **WIP** in the title to mark it as a work in progress.\n\nIn this way you cannot forget about that branch, and when you’ve finished writing code on it, you just need to remove the WIP from the title and assign to the right person to review it.\n\nIn the end, this is the script we came out with (when you add to your project, remember to make it executable):\n\n```\n#!/usr/bin/env bash\n# Extract the host where the server is running, and add the URL to the APIs\n[[ $HOST =~ ^https?://[^/]+ ]] && HOST=\"${BASH_REMATCH[0]}/api/v4/projects/\"\n\n# Look which is the default branch\nTARGET_BRANCH=`curl --silent \"${HOST}${CI_PROJECT_ID}\" --header \"PRIVATE-TOKEN:${PRIVATE_TOKEN}\" | python3 -c \"import sys, json; print(json.load(sys.stdin)['default_branch'])\"`;\n\n# The description of our new MR, we want to remove the branch after the MR has\n# been closed\nBODY=\"{\n    \\\"id\\\": ${CI_PROJECT_ID},\n    \\\"source_branch\\\": \\\"${CI_COMMIT_REF_NAME}\\\",\n    \\\"target_branch\\\": \\\"${TARGET_BRANCH}\\\",\n    \\\"remove_source_branch\\\": true,\n    \\\"title\\\": \\\"WIP: ${CI_COMMIT_REF_NAME}\\\",\n    \\\"assignee_id\\\":\\\"${GITLAB_USER_ID}\\\"\n}\";\n\n# Require a list of all the merge request and take a look if there is already\n# one with the same source branch\nLISTMR=`curl --silent \"${HOST}${CI_PROJECT_ID}/merge_requests?state=opened\" --header \"PRIVATE-TOKEN:${PRIVATE_TOKEN}\"`;\nCOUNTBRANCHES=`echo ${LISTMR} | grep -o \"\\\"source_branch\\\":\\\"${CI_COMMIT_REF_NAME}\\\"\" | wc -l`;\n\n# No MR found, let's create a new one\nif [ ${COUNTBRANCHES} -eq \"0\" ]; then\n    curl -X POST \"${HOST}${CI_PROJECT_ID}/merge_requests\" \\\n        --header \"PRIVATE-TOKEN:${PRIVATE_TOKEN}\" \\\n        --header \"Content-Type: application/json\" \\\n        --data \"${BODY}\";\n\n    echo \"Opened a new merge request: WIP: ${CI_COMMIT_REF_NAME} and assigned to you\";\n    exit;\nfi\n\necho \"No new merge request opened\";\n```\n\n### GitLab CI\n\nThe variables used in the script are passed to it by our `.gitlab_ci.yml` file:\n\n```\nstages:\n    - openMr\n    - otherStages\n\nopenMr:\n    before_script: []   # We do not need any setup work, let's remove the global one (if any)\n    stage: openMr\n    only:\n      - /^feature\\/*/   # We have a very strict naming convention\n    script:\n        - HOST=${CI_PROJECT_URL} CI_PROJECT_ID=${CI_PROJECT_ID} CI_COMMIT_REF_NAME=${CI_COMMIT_REF_NAME} GITLAB_USER_ID=${GITLAB_USER_ID} PRIVATE_TOKEN=${PRIVATE_TOKEN} ./utils/autoMergeRequest.sh # The name of the script\n```\n\nAll these environment variables are set by GitLab itself, but the PRIVATE-TOKEN. A master of the project has to create it in its own profile and add to the project settings.\n\nTo create the personal token you can go to `/profile/personal_access_tokens` on your GitLab instance, and then you add to your pipeline following this guide.\n\n### Ways to improve\n\nThe script is far from perfect.\n\nFirst of all, it has two API calls, one to take the list of MR and one to take the default branch, to use it as target. Of course you can hardcode the value (in the end it shouldn’t change often), but hardcoding is always bad.\n\nAlso, it uses python3 to extract the name of the target branch – this is just one of many possible solutions, just use what is available on your system. Apart from that, the script doesn’t have any external dependency.\n\nThe other thing is how you need to set up the secret token to call the APIs. Luckily, GitLab’s developers are working on a [new way](https://gitlab.com/gitlab-org/gitlab-ce/issues/12729) to manage secret tokens.\n\n### Conclusion\n\nThis was a very small and very simple example about how much powerful Continuous Integration can be. It takes some time to set up everything, but in the long run it will save your team a lot of headache.\n\nIn fleetster we use it not only for running tests, but also for having automatic versioning of the software and automatic deploys to testing environments. We are working to automate other jobs as well (building apps and publish them on the Play Store and so on).\n\nSpeaking of which, **do you want to work in a young and dynamic office with me and a lot of other amazing people?** Take a look at the [open positions at fleetster](https://www.fleetster.net/fleetster-team.html)!\n\nKudos to the GitLab team (and other guys who help in their free time) for their awesome work!\n\nIf you have any question or feedback about this blog post, please drop me an email at riccardo@rpadovani.com :-)\n\nBye for now,\nA. & R.\n\nP.S: if you have found this article helpful and you’d like we write others, do you mind to help us reaching the Ballmer’s peak and buy us a [beer](https://rpadovani.com/donations)?\n\nThis post originally appeared on [*rpadovani.com*](https://rpadovani.com/open-mr-gitlab-ci).\n\n## About the Guest Author\n\nRiccardo is a university student and a part-time developer at [fleetster](http://www.fleetster.net/). When not busy with university or work, he likes to contribute to open-source projects.\n",[110,4440,726],{"slug":8042,"featured":6,"template":678},"how-to-automatically-create-a-new-mr-on-gitlab-with-gitlab-ci","content:en-us:blog:how-to-automatically-create-a-new-mr-on-gitlab-with-gitlab-ci.yml","How To Automatically Create A New Mr On Gitlab With Gitlab Ci","en-us/blog/how-to-automatically-create-a-new-mr-on-gitlab-with-gitlab-ci.yml","en-us/blog/how-to-automatically-create-a-new-mr-on-gitlab-with-gitlab-ci",{"_path":8048,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8049,"content":8055,"config":8061,"_id":8063,"_type":16,"title":8064,"_source":17,"_file":8065,"_stem":8066,"_extension":20},"/en-us/blog/migrating-your-jira-issues-into-gitlab",{"title":8050,"description":8051,"ogTitle":8050,"ogDescription":8051,"noIndex":6,"ogImage":8052,"ogUrl":8053,"ogSiteName":692,"ogType":693,"canonicalUrls":8053,"schema":8054},"Migrating your JIRA issues to GitLab","We're migrating all of our working tools to open-source ones, and moving to GitLab has made all the difference.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667509/Blog/Hero%20Images/continuous-integration-from-jenkins-to-gitlab-using-docker.jpg","https://about.gitlab.com/blog/migrating-your-jira-issues-into-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Migrating your JIRA issues to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abdulkader Benchi\"}],\n        \"datePublished\": \"2017-08-21\",\n      }",{"title":8050,"description":8051,"authors":8056,"heroImage":8052,"date":8058,"body":8059,"category":14,"tags":8060},[8057],"Abdulkader Benchi","2017-08-21","\n\n Here at [Linagora](https://linagora.com/), we believe in open source. If you have read my [last article](/blog/docker-my-precious/), you should know that we have recently migrated from [Atlassian](https://www.atlassian.com/) to [GitLab](https://gitlab.com/).\n\n\u003C!-- more -->\n\n_Editor's note: We don't currently have a native way to migrate JIRA issues into GitLab issues, although we are [working on one](https://gitlab.com/gitlab-org/gitlab-ee/issues/2780)! In the meantime, we are very appreciative of community efforts to provide workarounds like this one._\n\nMigrating our repositories from [Bitbucket](https://bitbucket.org/) to GitLab was so easy thanks to Git. However, migrating our issues (aka tickets) from [JIRA](https://www.atlassian.com/software/jira) to GitLab was not so obvious. In fact, there are several alternative solutions to integrate JIRA as a plugin inside GitLab so as to continue using JIRA along with GitLab. However, our main goal was to completely leverage GitLab as our only open-source development tool.\n\nIf you want to know how to migrate your JIRA issues into GitLab, then you are on the right article. Once you read it, you will discover that it is really so easy to do the migration from JIRA to GitLab. Yes, as you can see, winter is coming to GitLab rivals, because everything is possible with GitLab.\n\n### Migrating JIRA issues into GitLab Issues\n\nOur migration process will leverage the [REST APIs](http://www.restapitutorial.com/) provided by both [JIRA REST API](https://developer.atlassian.com/jiradev/jira-apis/jira-rest-apis) and GitLab Issues [REST API](https://docs.gitlab.com/ee/api/issues.html).\n\n#### API calls:\n\nTo perform REST API cals, you can use your own preferred library. For me, I will use [axios](https://github.com/mzabriskie/axios), which is my preferred promise based HTTP client for the browser and node.js. You can simply install it locally by doing:\n\n```\nnpm install axios\n\n```\n\n#### JIRA side:\n\nBefore requesting the endpoints provided by JIRA, we need to gather the following information:\n\n```\n// the base url to your JIRA\nconst JIRA_URL = 'https://your-jira-url.com/';\n\n// the JIRA project ID (short)\nconst JIRA_PROJECT = 'PRO';\n\n// JIRA username and password used to login\nconst JIRA_ACCOUNT = {\n  username,\n  password\n};\n\n```\n\nNow, we need to call two endpoints call during the migration process. The first endpoint is to get all **JIRA issues**:\n\n```\naxios.request({\n  method: 'get',\n  url: `${JIRA_URL}/rest/api/2/search?jql=project=${JIRA_PROJECT}+order+by+id+asc&startAt=${offset}&maxResults=${limit}`,\n  auth: {\n    username: JIRA_ACCOUNT.username,\n    password: JIRA_ACCOUNT.password\n  }\n})\n```\n\nThe second endpoint is to get the **attachments** and the **comments** related to a given issue:\n\n```\naxios.request({\n  method: 'get',\n  /*\n  * JIRA_ISSUE = the JIRA issue that we get from the previous call\n  */\n  url: `${JIRA_URL}/rest/api/2/issue/${JIRA_ISSUE.id}/?fields=attachment,comment`,\n  auth: {\n    username: JIRA_ACCOUNT.username,\n    password: JIRA_ACCOUNT.password\n  }\n})\n```\n\n#### GitLab side:\n\nAs for JIRA, we need to gather some information before starting sending REST requests:\n\n```\n// the base url to your GitLab\nconst GITLAB_URL = 'http://your-gitlab-url.com/';\n\n// the project in gitlab that you are importing issues to\nconst GITLAB_PROJECT = 'namespaced/project/name';\n\n// GitLab username and password used to login\nconst GITLAB_ACCOUNT = {\n  username,\n  password\n};\n\n/* this token will be used whenever the API is invoked and\n* the jira's author of (the comment / attachment / issue) is not a gitlab user.\n* So, this identity will be used instead.\n* GITLAB_TOKEN is visible in your account: https://ci.linagora.com/profile/account\n*/\nconst GITLAB_TOKEN = 'get-this-token-from-your-profile';\n```\n\nEach JIRA issue has several fields which represent JIRA users, e.g., *assignee* and *reporter*. Once migrating to GitLab we should try to link these users to GitLab users (if they already exist on GitLab). However, if the user is not a GitLab user, then we have to leverage the **GITLAB_TOKEN** (line 18 in the last gist). That is, if the user does not exist on GitLab, then the identity of the user who is doing the migration will be used instead.\n\nTo search all GitLab users we need to send the following REST call:\n\n```\naxios.request({\n  method: 'get',\n  // 10000 users, should be enough to get them all\n  url: `${GITLAB_URL}/api/v4/users?active=true&search=&per_page=10000`,\n  headers: {\n    'PRIVATE-TOKEN': GITLAB_TOKEN\n  }\n})\n```\n\nAnd now, we can find the corresponding GitLab user for each JIRA user by doing:\n\n```\nfunction jiraToGitlabUser(JIRAUser) {\n    // GitLabUsers = the list of GitLab users we get from the last call\n    return JIRAUser ? _.find(GitLabUsers, { email: JIRAUser.emailAddress }) : null\n  }\n```\n\nIt is worth noting that JIRA and GitLab issues are different in nature, so you need to migrate one type of issue to another. After searching all [JIRA issues](https://medium.com/linagora-engineering/gitlab-rivals-winter-is-here-584eacf1fe9a) and [JIRA attachments](https://medium.com/linagora-engineering/gitlab-rivals-winter-is-here-584eacf1fe9a) and comments, we can now transfer them into GitLab issues by doing the following mapping:\n\n```\n{\n    title: JIRAIssue.fields.summary,\n    description: JIRAIssue.fields.description,\n    labels: [JIRAIssue.fields.issuetype.name],\n    created_at: JIRAIssue.fields.created,\n    updated_at: JIRAIssue.fields.updated,\n    done: issue.fields.status.statusCategory.name === 'Done' ? true : false,\n    assignee: jiraToGitlabUser(JIRAIssue.fields.assignee ),\n    reporter: jiraToGitlabUser(JIRAIssue.fields.reporter),\n    comments: JIRAComments.map(JIRAComment => ({\n      author: jiraToGitlabUser(JIRAComment.author),\n      comment: JIRAComment.body,\n      created_at: JIRAComment.created\n    })),\n    attachments: JIRAAttachments.map(JIRAAttachment => ({\n      author: jiraToGitlabUser(JIRAAttachment.author),\n      filename: JIRAAttachment.filename,\n      content: JIRAAttachment.content,\n      created_at: JIRAAttachment.created\n    }))\n};\n```\n\nNow our GitLab issue is created, all what we need to do is to post it:\n\n```\naxios.request({\n  method: 'post',\n  url: `${GITLAB_URL}/api/v4/projects/${encodeURIComponent(GITLAB_PROJECT)}/issues`,\n  // the GitLab issue that we have just created\n  data: GITLAB_ISSUE\n  headers: {\n    'PRIVATE-TOKEN': GITLAB_TOKEN\n  }\n})\n```\n\nAs you can see, migrating your JIRA tickets to GitLab is all about some REST API calls. As a developer, I think that you do such REST API calls every day. So we really do not need to stuck with JIRA nor to add it as a plugin to GitLab.\n\nIf you think that this article helps you discover something interesting that you feel you want to do every day, so please do not hesitate and join us. We are looking for new talents. For more information, you can have a look at our [Job site](https://job.linagora.com/en/).\n\n\nThis post originally appeared on _[Medium](https://medium.com/linagora-engineering/gitlab-rivals-winter-is-here-584eacf1fe9a)_.\n\n### About the Guest Author\n\nAbdulkader Benchi is the Javascript team leader at [Linagora](https://linagora.com/careers).\n",[232,727],{"slug":8062,"featured":6,"template":678},"migrating-your-jira-issues-into-gitlab","content:en-us:blog:migrating-your-jira-issues-into-gitlab.yml","Migrating Your Jira Issues Into Gitlab","en-us/blog/migrating-your-jira-issues-into-gitlab.yml","en-us/blog/migrating-your-jira-issues-into-gitlab",{"_path":8068,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8069,"content":8074,"config":8080,"_id":8082,"_type":16,"title":8083,"_source":17,"_file":8084,"_stem":8085,"_extension":20},"/en-us/blog/how-to-auto-deploy-a-gitlab-dot-com-project-to-google-cloud",{"title":8070,"description":8071,"ogTitle":8070,"ogDescription":8071,"noIndex":6,"ogImage":2225,"ogUrl":8072,"ogSiteName":692,"ogType":693,"canonicalUrls":8072,"schema":8073},"Auto Deploy a GitLab.com project to Google Cloud","How to get started with our auto deploy feature using Google Kubernetes Engine.","https://about.gitlab.com/blog/how-to-auto-deploy-a-gitlab-dot-com-project-to-google-cloud","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Auto Deploy a GitLab.com project to Google Cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dmitriy Zaporozhets\"}],\n        \"datePublished\": \"2017-08-10\",\n      }",{"title":8070,"description":8071,"authors":8075,"heroImage":2225,"date":8077,"body":8078,"category":14,"tags":8079},[8076],"Dmitriy Zaporozhets","2017-08-10","\n\nFor up-to-date information on GitLab's native integration with Google Kubernetes Engine, please visit the announcement blog post: [GitLab + Google Cloud Platform = simplified, scalable deployment](/blog/gke-gitlab-integration/).\n{: .alert .alert-gitlab-orange}\n\nOnce you write your code the next question is, \"How do you deploy it?\" There are plenty of ways to do it but none of them is perfect. You need to configure external tools, write your own scripts and maybe even do manual command execution every time you want a new version deployed. At GitLab we believe deployment should be an essential part of workflows, like code review and CI. Several months ago we shipped an amazing feature, [auto deploy](https://docs.gitlab.com/ee/topics/autodevops/stages.html), that should take care of code deployment for you. Finally, I found some time to give it a try.\n\n\u003C!-- more -->\n\n## What is GitLab Auto Deploy?\n\nOriginally released in [8.15](/releases/2016/12/22/gitlab-8-15-released/#auto-deploy) and heavily improved since then, auto deploy should deploy your application as part of a CI/CD pipeline within the GitLab user interface. It means you can set up an application to be deployed automatically every time a new commit lands into the `master` branch.\n\nAs per the documentation, the feature will package your application into a Docker image that then will be deployed to Kubernetes. GitLab has a container registry feature so the Docker image will be stored within GitLab too.\n\nSetup of the GitLab auto deploy feature is a matter of clicking on the \"Set up auto deploy\" button and applying the \"Kubernetes\" template to your `.gitlab-ci.yml` file. However, before this, you need to configure your GitLab project with valid credentials so it can access your cluster. Which brings us to the point that you need your Kubernetes cluster first.\n\n## Why Google Kubernetes Engine?\n\nThe documentation states, \"Google Kubernetes Engine is a managed environment for deploying containerized applications.\" It runs Kubernetes and you can create your cluster with a few clicks in the web interface.\n\nThere are [a few other](https://kubernetes.io/docs/setup/pick-right-solution/) Kubernetes hosting solutions available on the market and you might prefer a different one, but as a newcomer I decided to go with Google for a few reasons:\n\n* It's number one on the list of proposed solutions on Kubernetes doc.\n* It gives a nice free tier (300$ at the time of this blog post) which is enough for experiments.\n* Google originally started Kubernetes, so I expected some level of maturity from the service.  \n\n## How to tie it all together\n\nIt took me some time to figure out the sequence of events and actions to make it work. This is the result:\n\n1. Create GitLab.com project with Dockerfile\n2. Create cluster\n3. Copy credentials to GitLab.com project\n4. Apply auto deploy template to `.gitlab-ci.yml`\n\nAs a result, I have a [Ruby application](https://gitlab.com/dzaporozhets/minimal-ruby-app) that is built and deployed to staging automatically once I push code to the master branch. Additionally, I can manually deploy any pipeline to production with a single click.  \n\nFor those who are new to Kubernetes but want to try GitLab auto deploy in action, I made a [quick start guide](https://docs.gitlab.com/ee/topics/autodevops/stages.html).\n\n[Cover image](https://unsplash.com/@jbcreate_?photo=eUMEWE-7Ewg) by [Joseph Barrientos](https://unsplash.com/@jbcreate_) on Unsplash\n{: .note}\n",[728,1204],{"slug":8081,"featured":6,"template":678},"how-to-auto-deploy-a-gitlab-dot-com-project-to-google-cloud","content:en-us:blog:how-to-auto-deploy-a-gitlab-dot-com-project-to-google-cloud.yml","How To Auto Deploy A Gitlab Dot Com Project To Google Cloud","en-us/blog/how-to-auto-deploy-a-gitlab-dot-com-project-to-google-cloud.yml","en-us/blog/how-to-auto-deploy-a-gitlab-dot-com-project-to-google-cloud",{"_path":8087,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8088,"content":8094,"config":8100,"_id":8102,"_type":16,"title":8103,"_source":17,"_file":8104,"_stem":8105,"_extension":20},"/en-us/blog/git-wars-switching-to-gitlab",{"title":8089,"description":8090,"ogTitle":8089,"ogDescription":8090,"noIndex":6,"ogImage":8091,"ogUrl":8092,"ogSiteName":692,"ogType":693,"canonicalUrls":8092,"schema":8093},"Git Wars: Why I'm switching to GitLab","New GitLab user Christopher Watson puts us through our paces and weighs up his Git hosting options.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680411/Blog/Hero%20Images/git-wars-switching-to-gitlab.jpg","https://about.gitlab.com/blog/git-wars-switching-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Git Wars: Why I'm switching to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christopher Watson\"}],\n        \"datePublished\": \"2017-07-19\",\n      }",{"title":8089,"description":8090,"authors":8095,"heroImage":8091,"date":8097,"body":8098,"category":14,"tags":8099},[8096],"Christopher Watson","2017-07-19","\n\nIt’s a well-known fact: GitHub has the market share when it comes to Git hosting, with Bitbucket following close behind due to their “unlimited private repositories” policy. But what if I told you that those weren’t your only options?\n\n\u003C!-- more -->\n\nNow I have nothing against GitHub. It’s a great tool and I’ve been using it for years. It’s built primarily in a framework I love, Ruby on Rails, and its design is one we’ve all come to tolerate (if not love). With GitHub controlling most of the market share, most open source projects have also found a home there.\n\nAll of this being said, I’d be lying if I said that GitHub didn’t have its downsides. If you want private repositories, it’s going to cost you a pretty penny at $25 per month for your first five users, and then $9 per user after that. For comparison's sake, if you have 10 users in your organization it’s going to cost you $80 a month, and that’s a pretty small team. GitHub also has a sordid history when it comes to communicating with the community, implementing new features, and updating its somewhat dated look.\n\nBitbucket is another beast altogether. Created by the tech conglomerate Atlassian, Bitbucket is their answer to GitHub. Its claim to fame is that you can have unlimited private repositories for free…as long as your team has no more than five people.\n\nThis is actually the reason I first started using Bitbucket. However, if you do have more than five people, you’re going to pay, and the quality that you pay for isn’t all that great. I’ve worked on a number of projects on Bitbucket where my team was plagued by slow pulls/pushes, 503 errors when attempting to view a repo, and just overall jankyness. Besides that, their design also leaves a lot to be desired. The new design looks better, but also somehow makes things even more confusing (someone needs to learn the difference between UI and UX). To be completely honest I don’t have anything good to say about Bitbucket, so I’m going to continue.\n\n### So what’s this GitLab thing? Is it the answer to all of our Git hosting woes?\n\nWell yes and no. GitLab is a very good product, but it’s not perfect. It certainly isn’t as fast as GitHub when it comes to pushing and pulling repos. That being said, here are my reasons for switching to GitLab for my personal projects: GitLab is a Git hosting solution with a very large toolset and, objectively, a beautifully designed website (could it still use some work? Yes, but I digress). It is completely free for unlimited users, unlimited private repositories, and full access to most of the awesome features they provide.\n\nYes, they still have paid tiers for the [enterprise](/enterprise/). You can’t expect them to keep an awesome project like this going without some kind of monetization, but for us little guys you’ll most likely never have to pay. That has got to be music to your ears.\n\n### So you said it has “awesome features.” To what are you referring?\n\nWell, there’s quite a list. Let’s take a look:\n\n1. **Syntax themes!** In case you didn’t get that, I’ll say it again: syntax themes! This has been something that I have been waiting a long time for GitHub to come out with, but GitLab beat them to the punch. We’re still probably a long way away from having custom themes, but the ability to have a dark theme when checking diffs in the browser is awesome.\n\n1. **Registry:** GitLab also has a built-in Docker registry for your projects. This is an amazingly powerful feature for those that want to keep their containers off of the public registry at hub.docker.com, but don’t want to pay for a private service.\n\n1. **Pipelines/GitLab CI:** Continuous integration is a huge time saver and a great way to make sure a pull request isn’t going to break your app. GitLab saves you from having to use an external CI service by having their own CI built right in. Not to say you can’t use an external CI if you want; GitLab has integrations for Jenkins, Bamboo, and much more.\n\n1. **3rd Party Integrations:** As mentioned above, GitLab has 3rd party integrations for several services such as CI, code coverage, messaging, etc. Their Slack integration is great for notifying your team when stuff has been merged into master. I will be honest though, I am sure GitHub has more integrations.\n\n1. **All the features that make GitHub great:** GitLab also ships with Wikis, Markdown-based readmes, etc. You don’t really lose any features by switching, but you gain a ton.\n\n![screengrab](https://about.gitlab.com/images/blogimages/git-wars-2.png){: .shadow}\u003Cbr>\n\n### So if it’s so great, why isn’t everyone using it?\n\nThere are a couple of answers to that question. First off, you have the market share factor. GitHub was one of the first Git hosting providers to market and they’ve managed to hold onto that. That means that if you want people to contribute to your project, it helps to have it on GitHub because chances are the people that you want to contribute have an account.\n\nThe other answer is related. Comfortability. People are simply comfortable with the tool they know and a lot of people aren’t like me (willing to throw everything out the window because I truly believe that the better product should get my business). This is the same reason so many people are still using Atlassian products. It’s definitely not because of their user interfaces.\n\n### So where should I go from here?\n\nThat depends on you. If you’re comfortable getting to know a new way of doing things, I’d suggest you take a look at GitLab. It really is worth the time you’ll put into it.\n\nIf you’re already *comfortable* then go ahead and stick with what you know, but at least now you know that there are alternatives.\n\n## About the Author\n\n[Chris Watson](https://twitter.com/idev0urer) is a freelance full-stack developer who occasionally enjoys sharing some of his many opinions with the world. He and his wife currently reside in sunny Arizona.\n\n_This post was originally published on [blog.cwatsondev.com](https://blog.cwatsondev.com/git-wars-why-im-switching-to-gitlab/)._\n\n“[paper battle](https://www.flickr.com/photos/die_ani/9024130/)” by [anika](https://www.flickr.com/photos/die_ani/) is licensed under [CC0 1.0](https://creativecommons.org/publicdomain/zero/1.0/)\n{: .note}\n",[702,703,4440],{"slug":8101,"featured":6,"template":678},"git-wars-switching-to-gitlab","content:en-us:blog:git-wars-switching-to-gitlab.yml","Git Wars Switching To Gitlab","en-us/blog/git-wars-switching-to-gitlab.yml","en-us/blog/git-wars-switching-to-gitlab",{"_path":8107,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8108,"content":8114,"config":8121,"_id":8123,"_type":16,"title":8124,"_source":17,"_file":8125,"_stem":8126,"_extension":20},"/en-us/blog/how-we-use-gitlab-at-the-province-of-nova-scotia",{"title":8109,"description":8110,"ogTitle":8109,"ogDescription":8110,"noIndex":6,"ogImage":8111,"ogUrl":8112,"ogSiteName":692,"ogType":693,"canonicalUrls":8112,"schema":8113},"How we use GitLab at the Province of Nova Scotia","The Unix operations team at the Province of Nova Scotia decided to implement GitLab for source control and CI/CD. Here's how we started exploring DevOps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670226/Blog/Hero%20Images/how-we-use-gitlab-at-nova-scotia.jpg","https://about.gitlab.com/blog/how-we-use-gitlab-at-the-province-of-nova-scotia","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we use GitLab at the Province of Nova Scotia\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Steven Zinck\"},{\"@type\":\"Person\",\"name\":\"Paul Badcock\"}],\n        \"datePublished\": \"2017-07-18\",\n      }",{"title":8109,"description":8110,"authors":8115,"heroImage":8111,"date":8118,"body":8119,"category":14,"tags":8120},[8116,8117],"Steven Zinck","Paul Badcock","2017-07-18","\n\nIn 2015 the Unix operations team at the Province of Nova Scotia decided to implement GitLab for source control and [Continuous Integration and Continuous Deployment](/solutions/continuous-integration/). This was the beginning of our foray into DevOps practices. This article describes our automated testing, integration and release of Puppet code.\n\n\u003C!-- more -->\n\n![Image via Steve Zinck and Paul Badcock](https://about.gitlab.com/images/blogimages/nova-scotia-devops/devops-infinity-graphic.png){: .shadow}\u003Cbr>\n\nYou can also learn more about our DevOps transformation by watching our recent interview:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/SHdeqznJXbc\" frameborder=\"0\" allowfullscreen=\"true\">\u003C/iframe>\n\u003C/figure>\n\n### Source control\n\nA source control management (SCM) system allows the user to “commit” code, documentation and other system artifacts such as configuration files to a central location. Each change results in a new version of the file, and previous versions of the file remain available on the SCM. Restoring a previous version is quick and easy.\n\nWe needed a way for multiple sysadmins to be able to work on code without colliding with one another. We also needed a way to vet changes through a peer review process. GitLab makes this easy thanks to its support of branching and merge requests. Branching allows a sysadmin to create an individual copy of the production code (“master”) and work with it in isolation — this allows multiple team members to be working on the same production code base without being concerned about conflicts between their work.\n\n### Continuous integration\n\n![Image via Steve Zinck and Paul Badcock](https://about.gitlab.com/images/blogimages/nova-scotia-devops/ci-cd-workflow.png){: .shadow}\u003Cbr>\n\nAs we built out more of our infrastructure with Puppet, we needed an automated way of testing our code. Over time, our test strategy has evolved to include automated [syntax checking](https://puppet.com/blog/verifying-puppet-checking-syntax-and-writing-automated-tests), [linting](http://puppet-lint.com/), [unit](https://puppet.com/blog/unit-testing-rspec-puppet-for-beginners) and [integration](http://serverspec.org/) tests. Manual testing was not sufficient, as it was often forgotten about and was very time consuming. Automated testing solved that — for every code commit, the test pipeline is executed. A complete test cycle currently takes under five minutes.\n\nOn each code commit to a branch other than master, the following test pipeline is kicked off by GitLab CI:\n\n![Image via Steve Zinck and Paul Badcock](https://about.gitlab.com/images/blogimages/nova-scotia-devops/ci-screenshot.png){: .shadow}\u003Cbr>\n\nIf at any point a job fails, the pipeline stops and the sysadmin is notified. One of the great features of GitLab CI is its tight integration with Docker — each of the jobs above is run inside its own isolated container. The syntax-lint-spec job verifies that the Puppet syntax is good; linting confirms the code conforms to best practices; and spec confirms that logically the code functions as designed.\n\nThe test-kitchen jobs are a full suite of [ServerSpec](http://serverspec.org/) tests. We automatically provision four containers that represent our four most common configurations. Our Puppet code is applied to each container to verify that it will work in our production environment. This acts as a full regression test each time a code commit is made, and ensures that there were no unintended problems introduced. It gives us confidence that the code is actually doing what it’s intended to do.\n\n### Continuous deployment\n\nOnce all of the tests pass, the sysadmin can submit a merge request for their branch, and it will be reviewed by a senior staff member before reaching production. This is an important part of our workflow, because it gives junior staff the confidence that a more senior member of the team will review and approve a change before it reaches any of our servers. If the merge request is accepted, the branch will be merged into master and at that point GitLab CI will push the code to our Red Hat Satellite and Puppet Enterprise servers where it will be deployed to our environment.\n\n![Image via Steve Zinck and Paul Badcock](https://about.gitlab.com/images/blogimages/nova-scotia-devops/cd-screenshot.jpeg){: .shadow}\u003Cbr>\n\nYou can find the configuration files (Dockerfiles, .kitchen.yml, .gitlab-ci.yml and Satellite push script) at our [GitHub](https://github.com/nsgov).\n\nThe implementation of our system automation strategy and the toolset we selected has proven itself many times. We are spending less time fighting fires due to the streamlined and tested nature of our deployments and have earned the confidence of our clients.\n\n### The road ahead\n\nIn upcoming articles, we’ll write about the CI/CD process we built with [Communications Nova Scotia](https://novascotia.ca/cns/) that allows their development team to deploy and roll back their Dockerized application environment on demand. We also plan to write about our automated test strategy for Red Hat Ansible.\n\nThis post originally appeared on [*Medium*](https://medium.com/@szinck/how-we-use-gitlab-at-the-province-of-nova-scotia-708b514cc47f).\n\n## About the Guest Authors\n\n[Steve Zinck](https://www.linkedin.com/in/stevezinck/) spent most of his career working in the Public Service as a Unix and Infrastructure administrator. Over the past few years, he's started to transition away from traditional systems administration and begun to focus on software delivery and automation. As part of that transition, his team has implemented GitLab at the core of our automation and software delivery stack. His current focus is working with software and application teams to assist in streamlining their deployment and delivery process.\n\n[Paul Badcock](https://www.linkedin.com/in/pbadcock/?ppe=1) started working in the IT sector in 1998 with positions in small startups, to large fortune 500 companies, to currently on a public-sector team. His career was focused as a traditional IT Linux administrator until in the mid-2000s he started focusing on adopting development tooling, practices and methodologies for operational teams. This work culminated in implementing an early 2010s DevOps workplace framework with the help of @stewbawka and subsequently working with like-minded teams since. As a part of adopting developer tools he has previously worked with and managed CVS, SVN installations and various vendor products before reading a “Show HN” posting on Hacker News about GitLab.\n",[894,110,727],{"slug":8122,"featured":6,"template":678},"how-we-use-gitlab-at-the-province-of-nova-scotia","content:en-us:blog:how-we-use-gitlab-at-the-province-of-nova-scotia.yml","How We Use Gitlab At The Province Of Nova Scotia","en-us/blog/how-we-use-gitlab-at-the-province-of-nova-scotia.yml","en-us/blog/how-we-use-gitlab-at-the-province-of-nova-scotia",{"_path":8128,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8129,"content":8135,"config":8140,"_id":8142,"_type":16,"title":8143,"_source":17,"_file":8144,"_stem":8145,"_extension":20},"/en-us/blog/redesigning-gitlabs-navigation",{"title":8130,"description":8131,"ogTitle":8130,"ogDescription":8131,"noIndex":6,"ogImage":8132,"ogUrl":8133,"ogSiteName":692,"ogType":693,"canonicalUrls":8133,"schema":8134},"Redesigning GitLab's navigation","After a series of research and brainstorming sessions, we are excited to share with the community our redesign of GitLab's navigation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679771/Blog/Hero%20Images/redesign-navigation-cover-image.jpg","https://about.gitlab.com/blog/redesigning-gitlabs-navigation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Redesigning GitLab's navigation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taurie Davis\"}],\n        \"datePublished\": \"2017-07-17\",\n      }",{"title":8130,"description":8131,"authors":8136,"heroImage":8132,"date":8137,"body":8138,"category":14,"tags":8139},[7784],"2017-07-17","\n\nAt GitLab, we are taking big steps towards refining our interface in an effort to make the [idea to production](/learn/) workflow more productive. After a series of research and brainstorming sessions, we are excited to share with the community our redesign of GitLab's navigation.\n\n\u003C!-- more -->\n\n## Research and insight\n\nBack in March, we began our first of three rounds of user testing related to our navigation architecture. We knew from previous feedback that navigating GitLab's features was complex, difficult, and could be drastically improved. One of the major pitfalls we discovered in [this round](https://gitlab.com/gitlab-org/gitlab-ce/issues/29878) was confusion between global content vs. contextual content for new users when navigating. This was a large source of frustration when trying to find projects, or distinguish between your personal space and memberships.\n\n- **Global content** refers to the elements that are always available to you. Example: Your projects, issues, merge requests, and explore sections.\n\n- **Contextual content** refers to the elements that change based on the page you are viewing. Example: The content of an individual group page vs. a project page.\n\nUsing the data we gathered from our first round of testing, we began putting together common themes. This allowed us to gather questions and assumptions, which informed our next [prototype](https://gitlab-org.gitlab.io/gitlab-design/hosted/pedro/ux-research-4-navigation-usability-test-prototype-html-previews/#/screens/226216190) to test from. Our research team created a script that would walk users through a number of tasks. This script was then used to help us validate the assumptions we had made in our brainstorm session.\n\n{: .text-center}\n![Script](https://about.gitlab.com/images/blogimages/redesigning-gitlabs-navigation/script.png){: .shadow}\n\nWe [wrote up our findings from this round](https://gitlab.com/gitlab-org/ux-research/issues/5), identifying problem areas through the insight we gained by watching real users interact with our prototype. The team then set out to create a new design that addressed the interaction flaws we had identified during our testing.\n\n## Further discovery\n\nThrough a series of brainstorm sessions, the team created two new prototypes to use in testing. During [this round of testing](https://gitlab.com/gitlab-org/ux-research/issues/7), six users were shown [Prototype A](https://gitlab-org.gitlab.io/gitlab-design/hosted/chris/ux-research-7-prototype--vertical-breadcrumbs.framer/) and another six were shown [Prototype B](https://gitlab-org.gitlab.io/gitlab-design/hosted/chris/ux-research-7-prototype--horizontal-breadcrumbs.framer/). Each prototype used the same series of tasks, allowing us to track usability issues from each prototype individually, as well as compare the average time taken per task.\n\n**Prototypes A and B**\n![Prototype B](https://about.gitlab.com/images/blogimages/redesigning-gitlabs-navigation/prototypes.gif){: .shadow}\n\nDuring this third round of testing, we discovered that the majority of users were able to identify the difference between their global content from their contextual content after a few completed tasks. This was an improvement from our previous research which showed that users could finish a usability testing session without gaining an understanding of global vs. contextual content.\n\n## A shippable product\n\nFrom the data we gathered in our third and final round of testing, we began putting together a design that could be shipped in our 9.4 release. We took the most successful aspects of both prototypes and created a mockup that addressed the major pain points we discovered during all of our testing.\n\nWe knew from previous feedback and from our research that including the global navigation links on the top navigation bar was superior to hiding them in a hidden hamburger menu. Openly displaying them in the top navigation allows users to easily and quickly access the information that they revisit often.\n\n![Sidebar](https://about.gitlab.com/images/blogimages/redesigning-gitlabs-navigation/navigation-global-links-sidebar.png){: .shadow}\n\nWe also learned that including a standard breadcrumb menu helped users orient themselves. It became much easier for new and seasoned users to understand where they were located and navigate up a level.\n\n![Breadcrumb](https://about.gitlab.com/images/blogimages/redesigning-gitlabs-navigation/navigation-global-links--longer-project.png){: .shadow}\n\nKey experience decisions like these gave us a base for finalizing the foundation of the redesign. Afterwards, we began to take a closer look at the interface, add color, and define different states. Color played a key role in not only giving GitLab its own look and feel, but also further differentiating the global content from the contextual. We worked through [many iterations](https://gitlab.com/gitlab-org/gitlab-ce/issues/34402), until we nailed down an interface that used color as a guide and not as a distraction.\n\n![Final redesign](https://about.gitlab.com/images/blogimages/redesigning-gitlabs-navigation/final.png){: .shadow}\n\n## Continuing to iterate\n\nWe are excited to share our progress with the community and even more excited that this is just the beginning. There are a number of improvements that we are working on in order to further improve our navigation. A few of these features include:\n\n- Adding a fly-out drop down to the contextual navigation, making it easier and faster to reach sub menu items. [#34026](https://gitlab.com/gitlab-org/gitlab-ce/issues/34026)\n- Make the contextual sidebar collapsible to allow for more screen real estate. [#34028](https://gitlab.com/gitlab-org/gitlab-ce/issues/34028)\n- Adding multiple color palette options for differentiating instances. [#35012](https://gitlab.com/gitlab-org/gitlab-ce/issues/35012)\n- Adding content to global navigation dropdowns to allow easier access to recent projects and groups. [#35010](https://gitlab.com/gitlab-org/gitlab-ce/issues/35010)\n- Adding the contextual navigation on mobile devices. [#34036](https://gitlab.com/gitlab-org/gitlab-ce/issues/34036)\n\nYou can see our current list of issues planned for 9.5 in our [Meta: Global and contextual navigation](https://gitlab.com/gitlab-org/gitlab-ce/issues/32794) issue.\n\n## Try it yourself\n\nWe know that a UI change as large as a navigation redesign can be disruptive to workflow and habits but we hope that you will find GitLab much easier to navigate in 9.4! We actively worked to turn research and analysis into insight that would inform a more productive navigation architecture for GitLab. We have a number of improvements to make, and are including a way to turn the new navigation on and off while we continue to gather feedback and iterate in the next release. To turn the new navigation on, click your user profile dropdown and select \"Turn on new navigation\" or visit [your user preferences](https://gitlab.com/profile/preferences#new-navigation).\n\n{: .text-center}\n![Turn on new nav](https://about.gitlab.com/images/blogimages/redesigning-gitlabs-navigation/turn-on-nav.png){: .shadow}\n\n## Feedback\n\nAfter several rounds of UX research and taking into account the feedback received from the community, we believe we have a UX solution that greatly improves navigating GitLab. In addition to the roll out in 9.4 and the scheduled improvements for 9.5, we have created a [feedback issue](https://gitlab.com/gitlab-org/gitlab-ce/issues/34917) to collect, track, and act upon further feedback from the community. We would love to hear your thoughts so don't hesitate to leave us a comment below or in the issue!\n",[1144],{"slug":8141,"featured":6,"template":678},"redesigning-gitlabs-navigation","content:en-us:blog:redesigning-gitlabs-navigation.yml","Redesigning Gitlabs Navigation","en-us/blog/redesigning-gitlabs-navigation.yml","en-us/blog/redesigning-gitlabs-navigation",{"_path":8147,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8148,"content":8154,"config":8160,"_id":8162,"_type":16,"title":8163,"_source":17,"_file":8164,"_stem":8165,"_extension":20},"/en-us/blog/making-ci-easier-with-gitlab",{"title":8149,"description":8150,"ogTitle":8149,"ogDescription":8150,"noIndex":6,"ogImage":8151,"ogUrl":8152,"ogSiteName":692,"ogType":693,"canonicalUrls":8152,"schema":8153},"Making CI/CD easier with GitLab","The team at Trek10 tries to consider the need for automation and repeatability with everything they do. One team member gives a crash course in GitLab CI/CD and explains how they use it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680423/Blog/Hero%20Images/making-ci-easier-with-gitlab.jpg","https://about.gitlab.com/blog/making-ci-easier-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Making CI/CD easier with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rob Ribeiro\"}],\n        \"datePublished\": \"2017-07-13\",\n      }",{"title":8149,"description":8150,"authors":8155,"heroImage":8151,"date":8157,"body":8158,"category":14,"tags":8159},[8156],"Rob Ribeiro","2017-07-13","\n\nAt [Trek10](https://www.trek10.com/), we always try to consider the need for automation and repeatability with everything that we do. That’s why we focus on using tools like CloudFormation, [Serverless](/topics/serverless/), and CI/CD, as well as building other tools. Recently, I was tasked with doing various maintenance tasks on a number of internal tools/projects. Some needed upgrades from Node.js 0.10, some needed code fixes, and most needed CI/CD. Today, we’re just going to focus on the CI/CD part.\n\n\u003C!-- more -->\n\nIn spite of my past experience with Jenkins and TeamCity and our team’s experience with AWS (CodePipeline/CodeDeploy), I chose [GitLab CI/CD](/topics/ci-cd/) to standardize these projects. The biggest reason for this choice is history. As a project evolves, its CI/CD configuration may change. If you ever need to go back in time, you may have difficulty deploying again. Since GitLab CI/CD is based on a `.gitlab-ci.yml` config file that is committed with the code, as long as a commit built and deployed then, it stands a pretty good chance of building and deploying now. Being able to tweak CI/CD without leaving my editor was an additional bonus.\n\n### Crash course in GitLab CI/CD\n\nGitLab CI/CD relies on having a `.gitlab-ci.yml` file in the root of your repo. CI/CD for each commit is run against the `.gitlab-ci.yml` that is current for that commit. The fundamental unit of CI/CD for GitLab is a “job”. A job is a construct that runs a bash script against a commit in a particular context. You might have one job to run tests, other jobs to build for staging or production, and other jobs to deploy to particular environments. In the config file, jobs are represented by top level maps (aka “objects”) that are not otherwise “reserved” GitLab CI/CD maps. Examples of reserved top level maps: `image` (Docker image in which your jobs run), `services` (other Docker images that need to run while your jobs run), `before_script` (runs before every `script`), `after_script` (runs after every `script`), `stages` (redefines the stage names and order), `variables` (variables available to all jobs), and `cache` (controls what is cached between CI/CD runs; good for stuff from your package manager).\n\nEvery job must belong to a stage (if left out, `test` is the default). Stages are run in a sequence, and all of the jobs in a stage run with max parallelism available. The default stage sequence is: `build`, `test`, `deploy`. Each job also has `before_script`, `after_script`, `variables`, and `cache`. Defining these at a job level will override the top-level configuration. The most important of these is `variables`, because your variables are what make the production deploy job’s context different from the staging deploy job’s context. `variables` is just a map with a bunch of key value pairs. Variables are consumed with a syntax similar to bash: `${myVar}`. There are some limitations that you should know:\n\n* Variables do not support bash variable expansions, substitutions, defaults, etc.\n* Variables do not recurse or have a sense of order of evaluation, but top level variables can be used in job level variables. See the following examples:\n\n```\n# You CANNOT do this (referencing a sibling variable in the same map)\nvariables:\n    PROD_STAGE_NAME: prod\n    PROD_URL: https://thisismywebsite.com/${PROD_STAGE_NAME}\n```\n\n```\n# You CAN do this (referencing a top-level variable from a job's variables map)\nvariables:\n    PROD_STAGE_NAME: prod\n\nmy_job:\n    variables:\n        STAGE_NAME: ${PROD_STAGE_NAME}\n```\n\n```\n# But you CANNOT do something like this (nested variables)\nvariables:\n    CURRENT_STAGE: PROD\n    PROD_STAGE_NAME: prod\n\nmy_job:\n    variables:\n        STAGE_NAME: ${${CURRENT_STAGE}_STAGE_NAME}\n```\n\nThat last example gives us a ton of power. We’ll be sure to abuse that as we go.\n\nAs mentioned before, jobs run a bash script in a context. So every job must have a `script`. The last big thing that you need is “flow control”. By default, a job will run on every commit. Using the `only`, `except`, and `when` keys allows you to control how jobs are triggered. `only` and `except` accept the following options:\n\n* Branch names, e.g. `master` or `develop`\n* Tag names\n* JS style RegExp literals to evaluate against branch/tag names\n* These special keywords: `api`, `branches`, `external`, `tags`, `pushes`, `schedules`, `triggers`, and `web`\n* Using `branches` and `tags` with `only` cause a job to be run for every branch or tag, respectively\n* Repo path filters to deal with repo forks\n\nOne more important fact: jobs that start with a period character are disabled, e.g.: `.my_disabled_job`\n\nThat should be enough to get us started. You can find more [GitLab CI/CD documentation here](https://docs.gitlab.com/ee/ci/). The most useful bit is the `.gitlab-ci.yml` reference found [here](https://docs.gitlab.com/ee/ci/yaml/).\n\nAs with any new tool, I got to read and re-read the documentation and make some mistakes getting things right. By the time I was knee-deep in this, I realized there was a need to prevent anyone from having to do this again, myself included. The solution requires two things: a well-designed CI/CD template and a way to get that template into all of your new repositories. Let’s tackle template design next.\n\n### Designing a template\n\nThis part is hard to talk about in a completely generic manner. Instead, let’s walk through our use case. Looking at our projects past and present, I could usually bet on these characteristics:\n\n* Deploys to AWS (we are an AWS consultancy after all…)\n* Uses Serverless framework with Node.js or Python\n* May deploy production to multiple regions\n* May deploy different stages to different accounts\n\nIn addition, I realized that I needed these other options:\n\n* May need to “disable” dev/staging from doing real work\n* May want one dev environment per branch\n\nFinally, we decided on the following deployment strategy:\n\n* Production deploys via tags on `master`\n* Staging deploys on commits/merges to `master`\n* Dev deploys should work for all other branches (we’re not going to implement this one in this post)\n\nMy roots are as a software developer, so making things reusable is a core skill at this point. A good template is going to make it super easy for the intended cases and be fairly adaptable for other uses. Here is the goal:\n\n* One script per stage. That means only one test script, one build script, and one deploy script. Oh, and keep it DRY.\n* Jobs should be as similar as possible, and differences should be tweaked by top level variables.\n\nLet’s focus on that single script per stage. We’re not going to cover how to write the deployment script, but we’ll focus on the deploy stage. But let’s say we start with a deployment job like this:\n\n```\ndeploy:production:\n    stage: deploy\n    script: |\n        # assume ${DEPLOYMENT_ROLE} in AWS\n        # install dependencies\n        # run serverless deployment with ${STAGE_NAME} ${REGION}\n    variables:\n        DEPLOYMENT_ROLE: arn:aws:iam::1234567890:role/gitlab-ci-deployment\n        STAGE_NAME: prod\n        REGION: us-east-1\n        ACCOUNT: \"1234567890\"\n    only:\n        - tags\n```\n\n\nNow we could copy and tweak this for staging and dev, but that’s not what we’re after. First, let’s break the script off to a reusable chunk and use it in our staging deploy:\n\n```\n.deployment_script: &deployment_script\n    stage: deploy\n    script: |\n        # assume ${DEPLOYMENT_ROLE} in AWS\n        # install dependencies\n        # run serverless deployment with ${STAGE_NAME} ${REGION}\n\ndeploy:production:\n    \u003C\u003C: *deployment_script\n    variables:\n        DEPLOYMENT_ROLE: arn:aws:iam::1234567890:role/gitlab-ci-deployment\n        STAGE_NAME: prod\n        REGION: us-east-1\n        ACCOUNT: \"1234567890\"\n        PRODUCTION: \"true\"\n    only:\n        - tags\n\ndeploy:staging:\n    \u003C\u003C: *deployment_script\n    variables:\n        DEPLOYMENT_ROLE: arn:aws:iam::0987654321:role/gitlab-ci-deployment\n        STAGE_NAME: staging\n        REGION: us-east-1\n        ACCOUNT: \"0987654321\"\n    only:\n        - master\n```\n\nUsing YAML anchors and references, we can inject the script into all of our deployment jobs. Notice that the deployment script is disabled. This is because we don’t want it to run in parallel with all of our intended jobs. We also added a `PRODUCTION` environment variable to just the production deploy to allow our script to pick that up too. If your code knows about this, you can use this to turn on/off production-only features. Now, we can make this cleaner and easier for our developers by pulling all of the `variables` to a top-level variables map at the top of the file:\n\n```\nvariables:\n    PROD_ACCOUNT: \"1234567890\"\n    PROD_STAGE_NAME: prod\n    PROD_REGION: us-east-1\n    STAGING_ACCOUNT: \"0987654321\"\n    STAGING_STAGE_NAME: staging\n    STAGING_REGION: us-east-1\n\n.deployment_script: &deployment_script\n    stage: deploy\n    script: |\n        # assume ${DEPLOYMENT_ROLE} in AWS\n        # install dependencies\n        # run serverless deployment with ${STAGE_NAME}, ${REGION}, and ${ACCOUNT}\n\ndeploy:production:\n    \u003C\u003C: *deployment_script\n    variables:\n        DEPLOYMENT_ROLE: \"arn:aws:iam::${PROD_ACCOUNT}:role/gitlab-ci-deployment\"\n        STAGE_NAME: ${PROD_STAGE_NAME}\n        REGION: ${PROD_REGION}\n        ACCOUNT: ${PROD_ACCOUNT}\n        PRODUCTION: \"true\"        \n    only:\n        - tags\n\ndeploy:staging:\n    \u003C\u003C: *deployment_script\n    variables:\n        DEPLOYMENT_ROLE: \"arn:aws:iam::${STAGING_ACCOUNT}:role/gitlab-ci-deployment\"\n        STAGE_NAME: ${STAGING_STAGE_NAME}\n        REGION: ${STAGING_REGION}\n        ACCOUNT: ${STAGING_ACCOUNT}\n    only:\n        - master\n```\n\n\nNow, that’s looking more reusable, and we have accomplished our second goal of making the jobs very similar and controlled by top-level variables. This makes it easy for anyone who fits the template’s use case perfectly to reuse it. We could easily add the dev environment, but we’ll skip that in favor of illustrating multi-region production deploys:\n\n```\nvariables:\n    PROD_ACCOUNT: \"1234567890\"\n    PROD_STAGE_NAME: prod\n    PROD1_REGION: us-east-1\n    PROD2_REGION: us-west-2\n    STAGING_ACCOUNT: \"0987654321\"\n    STAGING_STAGE_NAME: staging\n    STAGING_REGION: us-east-1\n\n.deployment_script: &deployment_script\n    stage: deploy\n    script: |\n        # assume ${DEPLOYMENT_ROLE} in AWS\n        # install dependencies\n        # run serverless deployment with ${STAGE_NAME}, ${REGION}, and ${ACCOUNT}\n\n.production_variables\n    DEPLOYMENT_ROLE: \"arn:aws:iam::${PROD_ACCOUNT}:role/gitlab-ci-deployment\"\n    STAGE_NAME: ${PROD_STAGE_NAME}\n    ACCOUNT: ${PROD_ACCOUNT}\n    PRODUCTION: \"true\"    \n\ndeploy:production_1: &deploy_production\n    \u003C\u003C: *deployment_script\n    variables:\n        \u003C\u003C: *production_variables\n        REGION: ${PROD1_REGION}\n    only:\n        - tags\n\ndeploy:production_2:\n    \u003C\u003C: *deploy_production\n    variables:\n        \u003C\u003C: *production_variables\n        REGION: ${PROD2_REGION}        \n\ndeploy:staging:\n    \u003C\u003C: *deployment_script\n    variables:\n        DEPLOYMENT_ROLE: \"arn:aws:iam::${STAGING_ACCOUNT}:role/gitlab-ci-deployment\"\n        STAGE_NAME: ${STAGING_STAGE_NAME}\n        REGION: ${STAGING_REGION}\n        ACCOUNT: ${STAGING_ACCOUNT}\n    only:\n        - master\n```\n\nNotice that we have changed the job names to reflect having multiple regions. In addition, we are making use of YAML anchors and references to copy the entire `deploy:production_1` job into `deploy:production_2` and then we just override the `REGION` variable. This makes adding additional regions super easy.\n\nWhat’s more useful at this point is that, as long as you have made your script flexible enough, you can now distribute this to your development team as a template. If their project fits the script and configuration perfectly, they should just have to fill in the correct values for the top-level variables and go. For those needing something different, they should hopefully be able to just tweak the script. Now, we just need to solve the problem of making sure that they actually use the template…\n\n### Automatic CI/CD injection with GitLab and AWS Lambda\n\nI was inspired by GitHub’s option to select a .gitignore and license during the repo creation process. What if we could have that for CI? Forking GitLab and figuring out how to hack this in did not sound like a quick or easy thing to do. However, after a little research, I found that we could use a system hook to trigger a Lambda that could inject the desired template via the commit API. This part is not as interesting to read about, so we did one better: we have open sourced this tool so you can deploy it in your environment. Check out the repo [here](https://github.com/trek10inc/gitlab-boilerplate-injector). And if you’re looking for someone to help you implement these and other awesome automations and AWS solutions, we would love to talk to you. Feel free to reach out to us at info@trek10.com for more. Thanks for reading!\n\n## About the Guest Author\n\nRob has spent his career honing his interpersonal, technical, and problem solving skills. He spent five years in customer service and management, followed by over five years in software development and consulting. He has experience working and consulting for everything from startups to Fortune 500 enterprises in a variety of industries including manufacturing, healthcare, and finance. Rob has earned a MS in Applied Mathematics and Computer Science from Indiana University and a BS in Pharmaceutical Sciences from Purdue University.\n",[110,4440],{"slug":8161,"featured":6,"template":678},"making-ci-easier-with-gitlab","content:en-us:blog:making-ci-easier-with-gitlab.yml","Making Ci Easier With Gitlab","en-us/blog/making-ci-easier-with-gitlab.yml","en-us/blog/making-ci-easier-with-gitlab",{"_path":8167,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8168,"content":8174,"config":8180,"_id":8182,"_type":16,"title":8183,"_source":17,"_file":8184,"_stem":8185,"_extension":20},"/en-us/blog/dockerizing-review-apps",{"title":8169,"description":8170,"ogTitle":8169,"ogDescription":8170,"noIndex":6,"ogImage":8171,"ogUrl":8172,"ogSiteName":692,"ogType":693,"canonicalUrls":8172,"schema":8173},"Dockerizing GitLab Review Apps","A GitLab user shows us how to deploy Docker containers as a Review App.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680430/Blog/Hero%20Images/dockerizing-review-apps.jpg","https://about.gitlab.com/blog/dockerizing-review-apps","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Dockerizing GitLab Review Apps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Stephan Hochdörfer\"}],\n        \"datePublished\": \"2017-07-11\",\n      }",{"title":8169,"description":8170,"authors":8175,"heroImage":8171,"date":8177,"body":8178,"category":14,"tags":8179},[8176],"Stephan Hochdörfer","2017-07-11","\n\nLast year GitLab introduced the [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/) feature. Review Apps are app environments that are created dynamically every time you push a new branch up to GitLab. As a bonus point the app environments are automatically deleted when the branch is deleted. Since we moved to using Docker for quite a few of our projects I was keen on figuring out how to combine Docker and the GitLab Review Apps functionality as the documentation only mentions NGINX as a way to run Review Apps. As it turns out, it is rather simple to deploy Docker containers as a Review App.\n\n\u003C!-- more -->\n\nIn our scenario the GitLab Runner for building the Docker image and the GitLab Runner for \"running\" the Review Apps make use of the shell executor, that way we do not have to deal with Docker-in-Docker issues. Besides installing the gitlab-ci-multi-runner package we also installed Docker and docker-compose.\n\nFirst of all, we define two build stages in the .gitlab-ci.yml file – the build and deploy stage:\n\n```html\nstages:\n  - build\n  - deploy\n  ```\n\nThe build stage is defined like this:\n```html\nbuild:\n  tags:\n    - php7\n  stage: build\n  script:\n    - echo \"Building the app\"\n    - composer.phar install\n    - docker build -t myproject/myapp .\n    - docker tag myproject/myapp:latest \\\n      registry.loc/myproject/myapp:$CI_COMMIT_REF_NAME\n    - docker push registry.loc/myproject/myapp:$CI_COMMIT_REF_NAME\n  only:\n  - master\n  ```\n\nThis will create the Docker image and push it to our Sonatype Nexus instance which serves as a private Docker registry for us. As you can see I make use of the $CI_COMMIT_REF_NAME variable when tagging the Docker image. That way, we end up with a Docker image per branch. Downside: you cannot use characters in the branch name which are no valid Docker version identifiers. I still need to figure out a fix for this.\n\nThe deploy stage consists of two jobs: one for deploying the container, the other for undeploying the container:\n\n```html\ndeploy_dev:\n  tags:\n    - dev\n  stage: deploy\n  variables:\n    GIT_STRATEGY: none\n  script:\n    - echo \"Deploy to dev.loc\"\n    - docker pull registry.loc/myproject/myapp:$CI_COMMIT_REF_NAME\n    - docker stop reviewapp-demo-$CI_COMMIT_REF_NAME || true\n    - docker rm reviewapp-demo-$CI_COMMIT_REF_NAME || true\n    - docker run -d -P -l traefik.enable=true \\\n      -l traefik.frontend.rule=Host:reviewapp.dev.loc \\\n      -l traefik.protocol=http --name reviewapp-demo-$CI_COMMIT_REF_NAME \\\n      registry.loc/myproject/myapp:$CI_COMMIT_REF_NAME\n  environment:\n    name: dev\n    url: http://reviewapp.dev.loc\n  only:\n  - master\n  ```\n\nWhen this code is run it will simply pull the latest image from the private Docker registry and run it. Since the gitlab-runner user will push the image to the registry the user needs an account there and needs to be authenticated against the registry. I could not find a way how to configure the registry credentials via the .gitlab.yml file, so I ssh'ed into the boxes and manually run a \"docker login registry.loc\" for the gitlab-runner user. Currently we do not have many servers - virtual machines in our case - so that approach is fine, but does not scale in the future.\n\nWhen running the container we set a fixed name for the container. That way, we can easily stop it when it comes to the undeploy job. We also define some Traefik labels as we use Traefik in front of the docker daemon to route the requests. Traefik itself runs in a container as well. The Traefik container is launched like this:\n\n```html\ndocker run -d --restart=always -p 8080:8080 -p 80:80 -p 443:443 \\\n-l traefik.enable=false --name=traefik \\\n-v /var/run/docker.sock:/var/run/docker.sock \\\n-v /etc/traefik/traefik.toml:/etc/traefik/traefik.toml \\\n-v /etc/traefik/ssl/cert.key:/etc/traefik/ssl/cert.key \\\n-v /etc/traefik/ssl/cert.pem:/etc/traefik/ssl/cert.pem \\\ntraefik\n```\n\nWe do not use any fancy Traefik configuration, just the defaults for the docker backend. Since the Review Apps server runs in our intranet and uses our intranet domain name we were not able to use the Let's Encrypt support built in Traefik. Instead, we were required to generate a self-signed SSL certificate and mount that in the Traefik container.\n\nThe undeploy job is the final piece of the puzzle. GitLab allows you to manually stop Review Apps by clicking a Pause button the GitLab UI. To undeploy a Review App we simply stop and remove the container by the defined name.\n\n```html\nundeploy_dev:\n  tags:\n    - dev\n  stage: deploy\n  variables:\n    GIT_STRATEGY: none\n  script:\n    - echo \"Remove review app from dev.loc\"\n    - docker stop reviewapp-demo-$CI_COMMIT_REF_NAME || true\n    - docker rm reviewapp-demo-$CI_COMMIT_REF_NAME || true\n  when: manual\n  environment:\n    name: dev\n    action: stop\n```\n\nBoth the deploy_dev and the undeploy_dev job are bound by the tag \"dev\" to the dev server which hosts our docker instances. That way the docker instances will always start on the right server.\n\n## About the Author\n\n[Stephan Hochdörfer](https://twitter.com/shochdoerfer) currently holds the position of Head of Technology at [bitExpert AG](https://www.bitexpert.de), a company specializing in software and mobile development. His primary focus is everything related to web development as well as automation techniques ranging from code generation to deployment automation.\n\n_This post was originally published on [blog.bitexpert.de](https://blog.bitexpert.de/blog/dockerizing-gitlab-review-apps/)._\n\n[Cover image](https://unsplash.com/@guibolduc?photo=uBe2mknURG4) by [Guillaume Bolduc](https://unsplash.com/@guibolduc) on Unsplash\n{: .note}\n",[4440,110],{"slug":8181,"featured":6,"template":678},"dockerizing-review-apps","content:en-us:blog:dockerizing-review-apps.yml","Dockerizing Review Apps","en-us/blog/dockerizing-review-apps.yml","en-us/blog/dockerizing-review-apps",{"_path":8187,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8188,"content":8194,"config":8199,"_id":8201,"_type":16,"title":8202,"_source":17,"_file":8203,"_stem":8204,"_extension":20},"/en-us/blog/discovering-gitlabs-personas",{"title":8189,"description":8190,"ogTitle":8189,"ogDescription":8190,"noIndex":6,"ogImage":8191,"ogUrl":8192,"ogSiteName":692,"ogType":693,"canonicalUrls":8192,"schema":8193},"Discovering GitLab’s personas","Our User Experience (UX) Researcher updates us on the progress of GitLab’s personas","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679763/Blog/Hero%20Images/discovering_gitlabs_personas.jpg","https://about.gitlab.com/blog/discovering-gitlabs-personas","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Discovering GitLab’s personas\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarah O’Donnell\"}],\n        \"datePublished\": \"2017-06-08\",\n      }",{"title":8189,"description":8190,"authors":8195,"heroImage":8191,"date":8196,"body":8197,"category":14,"tags":8198},[6197],"2017-06-08","\n\nBack in January, I explained [why GitLab uses personas in product development](/blog/the-importance-of-ux-personas/). At the time, we were still in the process of discovering who GitLab’s personas were. To make sure that the needs and expectations of our users were met, we asked them to complete a survey to share their views with us. Since then, the results have been analyzed resulting in the first iteration of our personas. In this post, I’d like to share more about the survey which contributed to GitLab’s personas.\n\n## Survey design\n\nThe survey contained a mixture of open-ended and closed-ended questions.\n\nWe chose to use open-ended questions as this was the first survey we had produced which aimed to explore users’ motivations and experiences of using GitLab. We wanted to give participants the freedom to answer questions in their own words and avoid leading them towards answers that they wouldn’t have necessarily selected with close-ended questions.\n\nStudies have shown that people tend to focus more on earlier (primacy effect) or later (recency effect) options, with less time spent evaluating middle options. This suggests  the order in which we present questions to our users may affect the way they respond to them. For example, for the question of ‘Why do you contribute to open source tools?’, there were 10 possible answers users could select from, ranging from ‘To give back to the community’ to ‘To resolve issues I experience with the tool’. To ensure each answer received an equal opportunity to be selected, the ordering of the answers was shuffled between each user. This way, no option remained in the middle of the list and the risk of an option being overlooked due to its position was reduced. Where possible, other closed-ended questions received the same treatment, reducing bias and ensuring a fair distribution of responses.\n\nIn terms of choosing what questions to ask, we asked multiple people working in different teams across GitLab, how they would describe GitLab users. We wanted to be able to test their assumptions, along with our own. Using these assumptions, we formed research questions. Research questions are the goals and objectives of your study, rather than the questions which appear in your survey. They help you to clearly define what it is you want to find out from your survey before you even begin writing it. Once we had our research questions, we wrote the survey to directly address them.\n\nTo ensure that we could extract the information we required from the survey questions, we wanted to make sure that every respondent would interpret the questions the way we had intended. We asked colleagues to complete the survey to see if their answers differed from the true intent of the questions. Any ambiguous wording was amended. The survey was then incrementally shared externally with users. This allowed us to further monitor answers, while also checking the survey for bugs (For example, are users able to submit their answers?).\n\n## Responses\n\nWe were interested in primarily hearing from engaged GitLab users, so the survey was advertised on GitLab’s blog, social media accounts and via the UX webcast. The survey received just over 500 responses over a 50 day period.\n\n## Analysis\n\nSurveys are by no means perfect, they only capture the views of people who feel comfortable sharing information in this way. In brief, the users who chose to respond to the survey could be very different from those who chose not to respond, thus creating selection bias.\n\nMore than 100,000 organizations and millions of users are using GitLab, therefore a sample size of just over 500 people may seem relatively small. In order to identify users who could be underrepresented, it was important to explore who the respondents of the survey were. By comparing the differences between respondents versus nonrespondents, it was easy to identify where the weaknesses were in the data collected and to determine what needed further research. Equally so, it also highlighted the strengths of the data and what could be reported on with near certainty.\n\nSome of the attributes we compared between respondents and nonrespondents included:\n\n - Length of time using GitLab\n - GitLab edition (Community vs Enterprise)\n - Size of organization (for users who used GitLab at work)\n - Job role\n\n We also examined demographic and background information, such as age, location, and programming experience/qualifications.\n\n\n## Results\n\nWe added the newly-formed personas to GitLab's [handbook](/handbook/product/personas/#user-personas).\n\nDon’t feel you’re accurately represented? Don’t worry! The personas are very much a work in progress and we will continue to add to them based on further insights revealed from user interviews, usability testing and future surveys.\n\nWant to share your experiences of GitLab with me? Join [GitLab First Look](/community/gitlab-first-look/) and help us build an even better picture of who GitLab’s users really are!\n{: .alert .alert-gitlab-orange}\n",[1144],{"slug":8200,"featured":6,"template":678},"discovering-gitlabs-personas","content:en-us:blog:discovering-gitlabs-personas.yml","Discovering Gitlabs Personas","en-us/blog/discovering-gitlabs-personas.yml","en-us/blog/discovering-gitlabs-personas",{"_path":8206,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8207,"content":8213,"config":8219,"_id":8221,"_type":16,"title":8222,"_source":17,"_file":8223,"_stem":8224,"_extension":20},"/en-us/blog/continuous-integration-ticketmaster",{"title":8208,"description":8209,"ogTitle":8208,"ogDescription":8209,"noIndex":6,"ogImage":8210,"ogUrl":8211,"ogSiteName":692,"ogType":693,"canonicalUrls":8211,"schema":8212},"How GitLab CI supported Ticketmaster's ramp up to weekly mobile releases","Ticketmaster Android developer Jeff Kelsey shares why GitLab CI was a game changer for his team.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682946/Blog/Hero%20Images/tm-cover-image-small.jpg","https://about.gitlab.com/blog/continuous-integration-ticketmaster","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab CI supported Ticketmaster's ramp up to weekly mobile releases\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jeff Kelsey\"}],\n        \"datePublished\": \"2017-06-07\",\n      }",{"title":8208,"description":8209,"authors":8214,"heroImage":8210,"date":8216,"body":8217,"category":14,"tags":8218},[8215],"Jeff Kelsey","2017-06-07","\nIt's always been a goal for the Ticketmaster mobile team to get to weekly releases. In the first half of this year we were able to accomplish it, delivering new versions\nof both the Android and iOS app on a weekly basis since February. We've seen the positive impact on our fans, and it was even easier than we thought –\nmaking our entire application development process that much better.\n\nBut it didn't start out this way...\n\n\u003C!-- more -->\n\n![review-2](https://about.gitlab.com/images/blogimages/ticketmaster-assets/Review2.png \"Most user-friendly ticketing app\")*\u003Csmall>A faster, more consistent release cycle leads to a better fan experience for users of the Ticketmaster Apps.\u003C/small>\n\nThere comes a time in every engineer’s career when a part of your tech stack no longer passes the “smell test.\" Usually, there is some sort of dramatic event where something that was generally accepted as “isn’t the best, but it works” changes to “this is now a problem.” For me and the Ticketmaster mobile team, this event happened with our Jenkins-based CI pipeline in February.\n\nWe were about to release the newest version of our Android app, but there was a mistake in the build. We had forgotten to increment the Android versionCode, meaning we would need to update and create a new binary file to upload to the store. It was the end of the day, a sunny afternoon quickly fading to darkness in Hollywood. By now it was 6pm PST, and everyone was eager to leave.\n\n\"No problem,\" I thought. I can build the release locally in under three minutes, provide the file to the QA team, and we can all get on our way.\n\n“Won’t help us,” responded my high-standard and exceptional QA team.\n\n“All releases need to come from CI for consistency.” They were right. Local builds would not be safe for production. What if something about my machine’s configuration introduced an issue?\n\n“Ok, so how long does it take for the release build to get created through our Jenkins CI pipeline?” I asked, figuring the time couldn’t be worse than 30 minutes.\n\n“It takes two hours,” came the response. Sigh… Going to be a late night.\n\n![sysiphus](https://about.gitlab.com/images/blogimages/ticketmaster-assets/sysiphus.gif \"Sysiphus\")\n\n*\u003Csmall>Our old CI pipeline\u003C/small>*\n\n## GitLab CI to save the day (in a day!)\n\nTwo… hours…  For a minor change. Now I can’t lay all the blame on Jenkins. Some of this may have been our own fault, generating too many build flavors, forcing clean rebuilds in between steps and running extra tests for deprecated features. But, it was clear we needed to change and get better at CI. Jenkins was always a bit clunky for the last few years. Weighed down by plugins and years of legacy development, it was also difficult for us to update the Jenkins machines with new SDKs, and we had to rely on other teams to assist us. We clearly needed a fresh start.\n\nWe had been using GitLab at Ticketmaster for several years for code review and visually browsing our git history, so it made sense that trying to utilize [GitLab’s new CI tools](/solutions/continuous-integration/) would be worth a shot. I started with a helpful Android [blog post for setting up GitLab CI from Greyson Parrelli](http://www.greysonparrelli.com/post/setting-up-android-builds-in-gitlab-ci-using-shared-runners/).\n\nBut I soon ran into a problem. At Ticketmaster we use Amazon ECR for our [Docker](https://aws.amazon.com/docker/) container registry rather than GitLab repos, like in the tutorial. With the help of Kraig Amador, Tim Nichols, and others at Ticketmaster, I learned how to push my Docker container image to Amazon ECR and pull it down for each Android build in GitLab CI. The final results were a marked improvement:\n\n![gitlab-ci](https://about.gitlab.com/images/blogimages/ticketmaster-assets/Gitlab8min.png 'GitLab CI in 8 minutes')*\u003Csmall>Our GitLab CI build and test takes under 8 minutes to build, test, and publish artifacts.\u003C/small>*\n\nLess than eight minutes total from commit to build, test and generate artifacts. We can use Gradle and the SonarQube plugin to help us calculate code quality with every commit to our codebase, giving us more valuable information in addition to passing failing tests to evaluate all of our merge requests. This gives our team numbers to measure and make goals against.\n\nAnd we could see everything in one place, in GitLab. The iOS team had a more complicated pipeline, but they quickly followed with their own, running their tests on local runners. Since February we have had weekly releases of our mobile apps, and GitLab CI has been a huge part of our success over the past few releases.\n\n## From GitLab artifact to weekly releases\n\n![weekly-release](https://about.gitlab.com/images/blogimages/ticketmaster-assets/WeeklyReleases.png \"Weekly Releases\")*\u003Csmall>GitLab CI has helped us get to weekly releases with more consistent adoption of new releases.\u003C/small>*\n\nWith the benefit of faster cycle time, and faster releases, we have seen other benefits. Since each release has a smaller change set, our crash-free rates and store ratings have improved. We have less time waiting for build and spend more time improving the quality of our products. Our fans are getting features into their hands more quickly and benefit from a higher-quality and a consistently improving product. The CI analytics available on GitLab are an additional scoreboard for our team to optimize and improve into the future.\n\nNow, whenever we integrate new SDKs into our mobile apps, we are helping other teams get their SDK’s set up in GitLab CI to push integrated builds to our suite of integration and functional tests as a part of our process. We are [getting to innovation faster](https://tech.ticketmaster.com/2016/11/08/getting-to-innovation-faster/).\n\nThings were looking pretty scrappy for our CI pipeline only a few months ago. Now it is a whole different ballgame. If your team is looking for a way to breathe fresh life into a legacy CI pipeline, I suggest taking a look at GitLab CI. It has been a real game changer for our mobile team at Ticketmaster.\n\n![review-1](https://about.gitlab.com/images/blogimages/ticketmaster-assets/Review1.png \"Ticketmaster Mobile Review 1\")\n![review-3](https://about.gitlab.com/images/blogimages/ticketmaster-assets/Review3.png \"Ticketmaster Mobile Review 2\")\n\n### About the Author\n\nJeff Kelsey is the Lead Engineer for Ticketmaster's Android development team. Find him on twitter [@jeffkelsey](https://twitter.com/jeffkelsey).\n",[916,110,4440],{"slug":8220,"featured":6,"template":678},"continuous-integration-ticketmaster","content:en-us:blog:continuous-integration-ticketmaster.yml","Continuous Integration Ticketmaster","en-us/blog/continuous-integration-ticketmaster.yml","en-us/blog/continuous-integration-ticketmaster",{"_path":8226,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8227,"content":8233,"config":8238,"_id":8240,"_type":16,"title":8241,"_source":17,"_file":8242,"_stem":8243,"_extension":20},"/en-us/blog/fast-and-natural-continuous-integration-with-gitlab-ci",{"title":8228,"description":8229,"ogTitle":8228,"ogDescription":8229,"noIndex":6,"ogImage":8230,"ogUrl":8231,"ogSiteName":692,"ogType":693,"canonicalUrls":8231,"schema":8232},"Fast and natural continuous integration with GitLab CI","An overview of GitLab Continuous Integration and Delivery, and the main features of the tool.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749684106/Blog/Hero%20Images/fast-and-natural-continuous-integration-with-gitlab-ci.jpg","https://about.gitlab.com/blog/fast-and-natural-continuous-integration-with-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Fast and natural continuous integration with GitLab CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Samuel Alfageme\"}],\n        \"datePublished\": \"2017-05-22\",\n      }",{"title":8228,"description":8229,"authors":8234,"heroImage":8230,"date":8236,"body":8237,"category":14},[8235],"Samuel Alfageme","2017-05-22","\nDo you use GitLab to store your repos? Have you ever stopped to check what some of those tabs on top of your repositories do? Well, you can either disable those in your project settings, or you can keep reading to discover some ways in which they can help you power up your development speed.\n\n\u003C!-- more -->\n\n![what do these tabs do?](https://about.gitlab.com/images/blogimages/fast-and-natural-continuous-integration-with-gitlab-ci/your-awesome-project.png){: .shadow}\u003Cbr>\n\nThis post aims to offer a high-level overview of how GitLab has interpreted the main concepts of Continuous Integration/Delivery and introduce the main features of the tool as well as their naming conventions, as it can sound more overwhelming than it actually is.\n\nThe product’s growth has made it present on many companies’ tech stacks. Part of this popularity comes from being indeed a great open-source project to follow, its release cycle is blazing fast and it’s delivering new features every month on the 22nd. One of the most important factors of this equation is the fact that it’s not only a self-managed solution that enables you to have all your projects under control, but also a pretty solid one. Just ask your reference ops for his opinion on how even the community (non-commercial) edition brings many stuff to the table, that were incredibly expensive or even lacking on many other SaaS products before.\n\n![integrated development tools](https://about.gitlab.com/images/blogimages/fast-and-natural-continuous-integration-with-gitlab-ci/idea-to-production-graphics.png){: .shadow}\u003Cbr>\n\nIt sometimes happens that we simply stick to well-known tools for us and won’t notice or research alternatives that may improve our everyday work. I believe this to be the case with some of the GitLab’s lesser-known features. The software is much more than a traditional VCS server. In fact, last year they came up with the idea of their so-called “masterplan” to extend the product to be more like a “suite” and cover every step of the development cycle, or in their words: go from [idea to production](/blog/continuous-integration-delivery-and-deployment-with-gitlab/#from-idea-to-production-with-gitlab). And in both previous and [future](/direction/) releases, they are bundling some really cool additional integrations worth checking (e.g. Mattermost ChatOps or Prometheus monitoring).\n\n### Differences with other continuous integration tools\n\nLet’s start getting the full picture of the features that make it so powerful. First of all, its model is based on a lightweight YAML configuration file stored in each repository’s root. This has some pros worth to mention along with the rest:\n\n1. Tightly coupled systems (both Continuous Integration and VCS are a single product).\n2. The Continuous Integration configuration becomes versioned:\n   * Enforcing different branches with different configurations.\n   * Allowing contributors to also collaborate in the integration setup.\n3. Docker integration out of the box, including private docker registry per project.\n4. Artifacts browser that allows to access the stages output the same way you’d do locally.\n5. No nightmare maintenance time of the CI server.\n\nOf course, from the user’s point of view, it also comes with some drawbacks compared to other Continuous Integration systems, since it embraces a “convention over configuration” model, which means you get a pretty powerful tool without having to spend your time configuring it while you stick to its way of doing things. The lack of plug-ins and integrations we are used to seeing in other tools to fine-grain configure some aspects of the project is one of them (e.g. creating jobs that require multiple repositories becomes non-trivial). On the other hand, most use-cases are covered enough and features like configurable email alerts, history browser or programmable builds, you have them all.\n\nBut let’s be clear, they neither have invented the wheel nor are the only ones using it. Many other Continuous Integration software solutions also lay in similar paradigms (.travis.yml, Jenkinsfiles…). This is all a matter of taste. The strongest feeling you get after using different solutions is that GitLab’s is easier to configure plus it allows having most details under control in the same browser tab. They took some of the best features of every tool and bundled them into this solution. It makes complete sense.\n\n### Main features\n\nLet’s try to briefly illustrate the main features of this tool and demonstrate how easy it is just to get started with it even without any sysadmin experience or without bothering your DevOps. As said, everything spins around the .gitlab_ci.yml file containing the definition of the different stages (steps) that have to be completed in whole, in order to get the project successfully delivered. The [file structure](/direction/) is natural to be read and once you have seen a couple of examples, you’ll start writing your own without much effort.\n\nHead first for GitLab’s [CI docs](https://docs.gitlab.com/ee/ci/yaml/) for lots of info about how to translate the specific needs of your project into their conventions. But to summarize a review of the Continuous Integration capabilities every project has enabled by default:\n\n#### CI/CD settings\n\nThe settings page gives an overview on just how easy it is to configure everything related to what’s needed for the creation of the continuous integration magic, i.e.:\n\n#### Runners\n\nWhat if I tell you can finally forget about configuring and managing slave machines, the way they speak with the CI server through SSH, how to balance the workload between all the build machines, and many other stuff that is both tedious and often difficult? Meet the runners! [Setup](https://docs.gitlab.com/runner/install/) becomes just as easy as it is to follow the three steps described in the CI/CD settings (settings/ci_cd) on your project: install the right binary for your OS, set the URL endpoint and the registration token provided in the settings page and you are done. Also, you can tag the runners based on their capabilities (e.g. docker, databases, etc.) to select them for specific jobs when they are required.\n\nRunners can implement many executors, i.e. ways of running your build scripts/code in them; from the most basic ssh executor, through a container host and right to the biggest kubernetes cluster you can think of, supporting even Powershell/Batch in Windows systems.\n\nIn their self-managed solution, [gitlab.com](https://gitlab.com/), they also provide \"shared runners\": VMs free of charge, dynamically allocated to build your project, extremely useful for those open source projects with limited resources.\n\n#### Secrets management\n\nIn the microservices era, where your project might be integrated and talk with dozens of APIs that require tokens, secrets, passwords, and many other ways to authorize that dialog, a way to handle this complexity in an elegant manner becomes a priority. A really bad smell [seen in many projects](https://gitleaks.com/) is to store all these in config files on a remote machine or even hang around in some piece of code. To stress the importance of this, just consider how many [services](https://www.vaultproject.io/) and [projects](https://docs.docker.com/engine/swarm/secrets/) are popping up to handle this issue. GitLab projects provide a simple keystore in their Continuous Integration settings that can be accessed from the integration scripts, to help project members handle and configure all these secrets.\n\n#### Pipelines\n\nThis is the core feature of any Continuous Integration system and yet a really simple concept, that translates to all the steps you’ll follow from the moment you are facing your source code to the point in which you are about to deploy your application. In between, you can include everything you would consider to make sure your code looks right (linting), can be built, works right (testing), integrates with other systems and anything you can come up with to take the last steps and ship your code.\n\n\n![this is the core feature of any Continuous Integration system](https://about.gitlab.com/images/blogimages/fast-and-natural-continuous-integration-with-gitlab-ci/pipelines-are-a-core-feature.png){: .shadow}\u003Cbr>\n\n#### Container registry\n\nImagine having your own private Docker hub, where you can store your project’s images and update them whenever it’s needed without having to expose them to the public, and being able to pull them login from anywhere into the registry. You can have an image ready for every stage of the road and pulling them from your runners is blazing fast. This becomes super handy to avoid initializing the environment and therefore speeding up the total time the pipeline takes to run: faster builds = happier devs.\n\n#### Environments and Review Apps\n\nEveryone loves [gitflow](http://nvie.com/posts/a-successful-git-branching-model/), right? There are some good reasons for that. It’s built on the premise that if every branch is developed isolated, new features don’t interfere with each other or the stable version until they are merged back in the master branch. This helps with both developing new functionalities and testing them.\n\nWhen containers came into our lives it was obvious how the process of deploying different, independent environments with individual settings per feature could be eased and improved. It’s fair to say we now use containers as standard de facto for environment templates. Most mainstream Continuous Integration solutions were released way before the first container was even created, so they were not built with Docker in mind, but for GitLab CI it’s the other way around: they embrace containers as the way to go, based on the many benefits it brings to the workflow.\n\n![GitLab embraces containers as the way to go, based on the many benefits it brings to the workflow.](https://about.gitlab.com/images/blogimages/fast-and-natural-continuous-integration-with-gitlab-ci/environments-and-review-apps.png){: .shadow}\u003Cbr>\n\nImagine a very common scenario in the workflow: you are about to merge a feature branch that has to be tested out by the QA team, which introduces some new libraries and a new service (e.g. Redis, mongoDB…) to be deployed. Just update your Dockerfile to include the new layer(s) that manage those new dependencies and push the image to your local registry. Also, append the line to include the new service via docker hub to .gitlab_ci.yml. The feature branch, when pushed on the remote, will have everything in place to be deployed to the testing environment.\n\n![The feature branch, when pushed on the remote, will have everything in place to be deployed to the testing environment](https://about.gitlab.com/images/blogimages/fast-and-natural-continuous-integration-with-gitlab-ci/production-and-staging.png){: .shadow}\u003Cbr>\n\nThis is where [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/) come into action, it’s just a posh way to call dynamic, per-branch environments created to verify changes and see them live. Every branch gets instant deploy support when pushed to the repo. This process can speed up with tools such as [dpl](https://github.com/travis-ci/dpl), that abstract many of the details for major deploy services, or go freestyle and call your custom deployment script from there. It also integrates an interactive terminal in-browser to introspect on builds to debug and troubleshoot if needed. You no longer have to go over the Jenkins’ mantra to clone a configured job, rename it, adjust the branch in the configuration… and 10 or 15 tedious steps before deploying an ephemeral test environment.\n\nAnother major feature you are going to love is the [history browser](https://docs.gitlab.com/ee/ci/environments/index.html#viewing-the-deployment-history-of-an-environment). Imagine you deployed some changes to a demo environment and 10 minutes before going live, you detect it contains a major bug that would be nasty if displayed on camera. No worries; it’s super easy to access the history of what was deployed on every environment and perform rollbacks or redeploy on-demand to any previous reproducible state. Just awesome.\n\n![it’s super easy to access the history of what was deployed on every environment](https://about.gitlab.com/images/blogimages/fast-and-natural-continuous-integration-with-gitlab-ci/history-browser.png){: .shadow}\u003Cbr>\n\nI hope after this post, the central concepts of GitLab’s Continuous Integration make good sense to you and your projects.\n\n_This post was originally published on [solidgeargroup.com](https://solidgeargroup.com/gitlab_countinuous_integration_intro)._\n\n\u003Cp class=\"alert alert-orange\" style=\"background-color: rgba(252,163,38,.3); border-color: rgba(252,163,38,.3); color: rgb(226,67,41) !important; text-align: center;\">Sign up for a &nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i> &nbsp;&nbsp;\u003Cstrong>GitLab EE Trial\u003C/strong> &nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i> &nbsp;&nbsp;\u003Ca style=\"color: rgb(107,79,187);\" href=\"/free-trial/\">now!\u003C/a>!\u003C/p>\n",{"slug":8239,"featured":6,"template":678},"fast-and-natural-continuous-integration-with-gitlab-ci","content:en-us:blog:fast-and-natural-continuous-integration-with-gitlab-ci.yml","Fast And Natural Continuous Integration With Gitlab Ci","en-us/blog/fast-and-natural-continuous-integration-with-gitlab-ci.yml","en-us/blog/fast-and-natural-continuous-integration-with-gitlab-ci",{"_path":8245,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8246,"content":8252,"config":8257,"_id":8259,"_type":16,"title":8260,"_source":17,"_file":8261,"_stem":8262,"_extension":20},"/en-us/blog/devops-containers-gitlab-openshift",{"title":8247,"description":8248,"ogTitle":8247,"ogDescription":8248,"noIndex":6,"ogImage":8249,"ogUrl":8250,"ogSiteName":692,"ogType":693,"canonicalUrls":8250,"schema":8251},"Demo - Auto Deploy from GitLab to an OpenShift container cluster","See how to reliably and repeatably build, test, and deploy an application in a container from GitLab to the OpenShift container cluster.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671657/Blog/Hero%20Images/devops-openshift-webcast-blog-cover.png","https://about.gitlab.com/blog/devops-containers-gitlab-openshift","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Demo - Auto Deploy from GitLab to an OpenShift container cluster\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Erica Lindberg\"}],\n        \"datePublished\": \"2017-05-16\",\n      }",{"title":8247,"description":8248,"authors":8253,"heroImage":8249,"date":8255,"body":8256,"category":14},[8254],"Erica Lindberg","2017-05-16","\n\nContainers are an essential tool for achieving [DevOps](/stages-devops-lifecycle/) at scale. Bringing code and infrastructure closer together, containers provide consistency across environments and tools for developers, QA, and IT. Using GitLab's [built-in CI/CD](/solutions/continuous-integration/) and our integration with OpenShift, you can run all of your CI/CD jobs in a container cluster.\n\n\u003C!-- more -->\n\n## What is a container?\n\nContainers work much like a virtual machine except that, instead of packaging your code with an operating system, containers are run as a Linux process inside of the kernel. This means that each container only contains the code and dependencies needed to run that specific application, making them smaller and faster to run.\n\nFor developers, containers make it possible to build one version of an application that can be easily deployed to multiple types of environments. Essentially, whatever developers and QA runs, is exactly what finds its way to production. What you see in development is what you see in testing, staging, and production. Code can be shipped faster when packaged in a container because errors and bugs are caught earlier in process.\n\nCommunication and collaboration between developers and operations also improves because developers have seen exactly what it is that operations is getting, and operations are running exactly what the developers gave them. The result is that everyone can focus more on shipping quality code faster because applications don't have to be rebuilt as they move through the development lifecycle.\n\nGitLab integrates with both Kubernetes and OpenShift container orchestration platforms, making it possible to run [continuous integration and continuous delivery](/topics/ci-cd/) related jobs in the cluster. It also gives you the ability to set up different environments, called [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/), for different branches of your code base. Review Apps make it easy to view and test changes in an environment, in the cluster, so you can iterate and test faster.\n\n\n### Demo\n\nIn this demonstration, Senior Build Engineer DJ Mountney, will show how you can build, test, and deploy a basic application in a container from GitLab to an OpenShift container cluster.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/EwbhA53Jpp4\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n### Watch the webcast\n\nTo learn more about containers, how they can help scale your [DevOps workflow](/topics/devops/), and the GitLab/OpenShift integration, [watch](https://www.youtube.com/watch?v=uofcDMclUnk&feature=youtu.be) **The DevOps Journey: Using Containers webcast**.\n",{"slug":8258,"featured":6,"template":678},"devops-containers-gitlab-openshift","content:en-us:blog:devops-containers-gitlab-openshift.yml","Devops Containers Gitlab Openshift","en-us/blog/devops-containers-gitlab-openshift.yml","en-us/blog/devops-containers-gitlab-openshift",{"_path":8264,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8265,"content":8271,"config":8276,"_id":8278,"_type":16,"title":8279,"_source":17,"_file":8280,"_stem":8281,"_extension":20},"/en-us/blog/how-to-use-macstadium-and-gitlab-ci-to-build-your-macos-or-ios-projects",{"title":8266,"description":8267,"ogTitle":8266,"ogDescription":8267,"noIndex":6,"ogImage":8268,"ogUrl":8269,"ogSiteName":692,"ogType":693,"canonicalUrls":8269,"schema":8270},"How to use GitLab CI and MacStadium to build your macOS or iOS projects","Learn how to use GitLab CI on MacStadium's mini servers to build your macOS or iOS projects.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671709/Blog/Hero%20Images/macstadium-datacenter.jpg","https://about.gitlab.com/blog/how-to-use-macstadium-and-gitlab-ci-to-build-your-macos-or-ios-projects","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab CI and MacStadium to build your macOS or iOS projects\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Achilleas Pipinellis\"}],\n        \"datePublished\": \"2017-05-15\",\n      }",{"title":8266,"description":8267,"authors":8272,"heroImage":8268,"date":8274,"body":8275,"category":14},[8273],"Achilleas Pipinellis","2017-05-15","\n\nIn this article, we will see how to get started with\n[MacStadium](https://www.macstadium.com \"MacStadium website\"),\na provider that offers dedicated Mac hardware which you can use with\n[GitLab CI](/solutions/continuous-integration/ \"GitLab CI/CD feature page\")\nand build your macOS or iOS application.\n\n\u003C!-- more -->\n\n[Continuous Integration and Delivery with GitLab](/blog/continuous-integration-delivery-and-deployment-with-gitlab/ \"Blog on CI/CD with GitLab\")\nis easier if you are developing your application on Linux. All you need to do is\nfind a provider, spin up a VM, install [GitLab Runner](https://docs.gitlab.com/runner \"GitLab Runner docs\")\non it and configure your project's [`.gitlab-ci.yml` file](https://docs.gitlab.com/ee/ci/yaml/ \".gitlab-ci.yml reference guide\").\nOn macOS on the other hand, things may not be that easy. Finding a provider that\noffers a Mac machine that will be online 24/7 can be hard. MacStadium is here to\nhelp achieve that goal and offers a discount to all GitLab users. Let's see how\nto make this happen.\n\n---\n\n### On this page\n{:.no_toc}\n\n- TOC\n{:toc}\n\n---\n\n## Choosing a Mac mini plan\n\nThe first step is to choose a plan. MacStadium offers many options, so you can\npick whatever fits your needs. A [Mac mini](https://www.macstadium.com/mac-mini/#products)\nis perfect to test things out and is a good first choice. It should be enough\nto get you started, but depending on your needs you may have to upgrade to a\nmore beefy machine.\n\nDepending on your location and project needs, you have to:\n\n1. Choose the datacenter\n1. Choose the Mac mini\n1. Choose the macOS version\n1. Click **Start Trial** (24 hours) on the Mac mini of your choice to get going\n   in a few steps\n\nAt first you'll be asked to [create a MacStadium account](http://help.macstadium.com/accounts-and-billing/how-to-create-an-account.html \"How to create an account on MacStadium\")\nFill in your information and when in the **Secure Checkout** page, be sure to\ncheck the _I want to use a coupon code_ option and use `GITLAB10` for 10% off.\n\n\u003Ci class=\"fas fa-info-circle\" aria-hidden=\"true\" style=\"color: rgb(49, 112, 143);\">\u003C/i>\nAt the time of this writing, the staged machines come with macOS 10.12.2. As\nnew releases com from Apple, MacStadium tests them thoroughly and then update\ntheir staged machines. Of course, as a customer you can run the OS update on\nyour own machine anytime.\n{: .alert .alert-info}\n\n## Connecting to the Mac mini\n\nWhen you sign up, you'll receive an email with two important pieces of\ninformation:\n\n1. **The static IP address of your Mac.**\n   This will allow you to find your Mac mini from anywhere in the world, and\n   you can also point domain names to this IP address so it's easier to remember.\n\n1. **The username and password to access the machine.**\n   The user is standard for all installs, but the password is randomly\n   generated. Upon the first login, you can and are encouraged to change it.\n\nAfter you have this information, there are two ways of connecting to your Mac mini:\n[VNC](https://en.wikipedia.org/wiki/Virtual_Network_Computing \"VNC article on Wikipedia\")\nand [SSH](https://en.wikipedia.org/wiki/Secure_Shell \"SSH article on Wikipedia\").\n\n### Connecting with VNC\n\nIf connecting from a Mac, [Apple's Screen Sharing](https://support.apple.com/kb/PH25554 \"macOS Sierra: Set up and use screen sharing\")\nis ideal. It's installed on every Mac and can you can connect on your MacStadium\nMac mini easily with the username and password that was sent to you via email.\n\nIf you don't have a favorite VNC client already, there are a number of options\nto choose from:\n\n- [TigerVNC](http://tigervnc.org/ \"TigerVNC website\") (Multi-platform)\n- [Vinagre](https://wiki.gnome.org/Apps/Vinagre \"Vinagre website\") (GNU/Linux)\n- [Remmina](https://remmina.org/ \"Remmina website\") (GNU/Linux)\n- [RealVNC](https://www.realvnc.com/ \"RealVNC website\") (Multi-platform)\n\nWhen you connect with VNC, you can use the same password that is set up for the\nuser on the Mac mini.\n\n\u003Ci class=\"fas fa-info-circle\" aria-hidden=\"true\" style=\"color: rgb(49, 112, 143);\">\u003C/i>\nmacOS has that as a separate setting, but MacStadium's provision scripts\nenable VNC connections by default.\n{: .alert .alert-info}\n\n### Connecting with SSH\n\nYou can SSH into your machine using the username and password. For example, if\nthe username is `administrator` and the IP address of your machine `1.2.3.4`:\n\n```bash\nssh administrator@1.2.3.4\n```\n\nEnter the password when asked (you won't be able to see it), hit Enter and\nyou're in!\n\n## Setting up the development environment\n\nThe following steps are to be performed to the remote Mac machine. We need to\ninstall [Xcode](https://developer.apple.com/xcode/ \"Xcode on Apple's website\")\nand the command line tools that contain the SDKs and UNIX development applications\nlike the LLVM compiler, etc.\n\n\u003Ci class=\"fas fa-info-circle\" aria-hidden=\"true\" style=\"color: rgb(49, 112, 143);\">\u003C/i>\nApart from the needed toolset, the screensaver needs to be disabled otherwise\nthe machine will be put to sleep, thus disrupting the workflow of CI.\nThankfully, all the machines hosted on MacStadium are set to never sleep by\ndefault. It's just done in **System Preferences > Energy Saver**.\n{: .alert .alert-info}\n\n### Installing Xcode\n\nThere are two ways to install Xcode. You can either download it from Apple's\ndeveloper portal or use the App Store where you might be asked to fill in your\ncredit card information. To avoid that, manually download Xcode:\n\n1. Connect via VNC to the remote Mac machine\n1. Open a browser and go to \u003Chttps://developer.apple.com/download/more>\n1. Login with your existing AppleID or create one\n1. Select the Xcode version you wish to install\n\n   ![Download latest stable Xcode](https://about.gitlab.com/images/blogimages/how-to-use-macstadium-and-gitlab-ci-to-build-your-macos-or-ios-projects/download-xcode-without-cc-info.png){: .shadow}\n\n   This should download the Xcode package in your Downloads folder and\n   decompress it automatically. After a couple of minutes it should finish.\n\n1. Open a terminal and move `Xcode.app` to `/Applications` so that it can be\n   found by Launchpad:\n\n   ```\n   mv ~/Downloads/Xcode.app /Applications\n   ```\n\n1. Go to your Applications, and double click on Xcode in order to install it.\n   The verification will begin, that should take a minute.\n1. Answer \"Open\" when you are asked if you are sure you want to open it.\n1. Select \"Agree\" in the License Agreement and provide your administrator\n   password.\n1. The installation process should begin and after a while you will have Xcode\n   installed and ready to be used.\n\nIn the next step we will install the command line developer tools.\n\n### Installing the command line tools\n\nThe command line tools are a subsection of Xcode, so if you installed Xcode,\nyou may skip this part.\n\n1. Open the Terminal app or run this command via SSH:\n\n    ```bash\n    xcode-select --install\n    ```\n\n1. Accept the License Agreement and the installation will begin\n\n---\n\nNow that all development tools are installed, it's time to install GitLab\nRunner.\n\n## Installing and configuring the GitLab Runner\n\nGitLab Runner is responsible for running your jobs in macOS and then it\nreports the results back to GitLab.\n\nTo install it, [follow the instructions in the Runner's documentation](https://docs.gitlab.com/runner/install/osx.html#installation \"Documentation on installing GitLab Runner on macOS\").\nMake sure to also carefully read the [current limitations](https://docs.gitlab.com/runner/install/osx.html#limitations-on-macos \"Limitations of macOS Runner\").\n\n## Testing a project with CI\n\nThe final piece of this puzzle is to set up a project in GitLab (your own\n[CE/EE instance ](/stages-devops-lifecycle/) or even [GitLab.com](/pricing/) ) and hook it up\nwith Mac mini. We will not expand on that, but you can follow this nice blog post\nby Angelo Stavrow on [Setting up GitLab CI for iOS projects](/blog/setting-up-gitlab-ci-for-ios-projects/ \"Blog on setting up CI for iOS projects\").\nIt includes comprehensive steps to get you started.\n\n## Conclusion\n\nHaving a dedicated Mac machine for your development can save you precious time\nwhen you follow the Continuous Integration workflow with your team. In this\ntutorial, you've seen how to quickly get a remote Mac mini up and running\nfor your needs. Be sure to add the promo code `GITLAB10` on checkout for 10% off!\n\nHappy building!\n\n---\n\n\u003Ci class=\"fas fa-info-circle\" aria-hidden=\"true\" style=\"color: rgb(60,118,61);\">\u003C/i>\nGet 10% off of all MacStadium plans by using \u003Cstrong>GITLAB10\u003C/strong> at checkout.\nThe coupon can only be applied when signing up hardware, but if you are an\nexisting customer who is using Gitlab and makes a change in hardware, you can\nalways apply it again or request the discount in a ticket.\n{: .alert .alert-success}\n\n----\n\nCover image [provided by MacStadium](https://gitlab.com/gitlab-com/www-gitlab-com/merge_requests/3324#note_29314223).\n{: .note}\n",{"slug":8277,"featured":6,"template":678},"how-to-use-macstadium-and-gitlab-ci-to-build-your-macos-or-ios-projects","content:en-us:blog:how-to-use-macstadium-and-gitlab-ci-to-build-your-macos-or-ios-projects.yml","How To Use Macstadium And Gitlab Ci To Build Your Macos Or Ios Projects","en-us/blog/how-to-use-macstadium-and-gitlab-ci-to-build-your-macos-or-ios-projects.yml","en-us/blog/how-to-use-macstadium-and-gitlab-ci-to-build-your-macos-or-ios-projects",{"_path":8283,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8284,"content":8290,"config":8294,"_id":8296,"_type":16,"title":8297,"_source":17,"_file":8298,"_stem":8299,"_extension":20},"/en-us/blog/demo-service-desk",{"title":8285,"description":8286,"ogTitle":8285,"ogDescription":8286,"noIndex":6,"ogImage":8287,"ogUrl":8288,"ogSiteName":692,"ogType":693,"canonicalUrls":8288,"schema":8289},"Demo - GitLab Service Desk","In 9.1, we introduced our new Service Desk feature, allowing your customers to reach you inside GitLab simply by using a support email address.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749684042/Blog/Hero%20Images/demo-service-desk.jpg","https://about.gitlab.com/blog/demo-service-desk","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Demo - GitLab Service Desk\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Victor Wu\"}],\n        \"datePublished\": \"2017-05-09\",\n      }",{"title":8285,"description":8286,"authors":8291,"heroImage":8287,"date":8292,"body":8293,"category":14},[6668],"2017-05-09","\nAs you expand your software products, GitLab's new [Service Desk](/releases/2017/04/22/gitlab-9-1-released/#service-desk-eep) feature in [9.1](/releases/2017/04/22/gitlab-9-1-released/) enables your growing user base to send emails to your team via a dedicated address per project for any kind of feedback or support. \n\n\u003C!-- more -->\n\nAfter enabling Service Desk in your project settings, every email sent to the support email address shows up as a confidential issue in your project. Commenting on them generates a response to the original email sender, creating a brand new, integrated user feedback channel right inside GitLab. As Service Desk is built right into GitLab itself, the complexity and inefficiencies of multiple tools and external integrations are eliminated, significantly shortening the cycle time from feedback to software update.\n\n### Demo\n\nWatch this demonstration of a support workflow using Service Desk, and how you can use other features within GitLab — like creating a “service-desk-tickets” label so that your support team can quickly filter issues within a project.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/m6oHRIeT1AE\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\nSign up for a free trial of [GitLab Enterprise Edition](/free-trial/) to see firsthand how it can help your team.\n{: .alert .alert-gitlab-orange}\n",{"slug":8295,"featured":6,"template":678},"demo-service-desk","content:en-us:blog:demo-service-desk.yml","Demo Service Desk","en-us/blog/demo-service-desk.yml","en-us/blog/demo-service-desk",{"_path":8301,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8302,"content":8308,"config":8312,"_id":8314,"_type":16,"title":8315,"_source":17,"_file":8316,"_stem":8317,"_extension":20},"/en-us/blog/mapping-work-to-do-versus-time-with-burndown-charts",{"title":8303,"description":8304,"ogTitle":8303,"ogDescription":8304,"noIndex":6,"ogImage":8305,"ogUrl":8306,"ogSiteName":692,"ogType":693,"canonicalUrls":8306,"schema":8307},"Demo - Mapping work versus time, with burndown charts","Our Frontend Lead Jacob Schatz explains how managers and ICs can benefit from our burndown chart feature, released in GitLab 9.1.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666633/Blog/Hero%20Images/gitlab-2016-in-review-cover.png","https://about.gitlab.com/blog/mapping-work-to-do-versus-time-with-burndown-charts","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Demo - Mapping work versus time, with burndown charts\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emily von Hoffmann\"}],\n        \"datePublished\": \"2017-04-25\",\n      }",{"title":8303,"description":8304,"authors":8309,"heroImage":8305,"date":8310,"body":8311,"category":14},[6728],"2017-04-25","\nEvery software development team likely feels pressure to move faster, shipping more software in shorter time periods. With GitLab [9.1](/releases/2017/04/22/gitlab-9-1-released/), we've introduced burndown charts to further help you track and manage your work.\n\n\u003C!-- more -->\n\nBurndown charts for projects help teams visualize the number of issues that are incomplete as they progress through a milestone. You can see the number of issues left to do, along with their cumulative issue weight, \"burn down\" over the remaining time before your deadline. This prepares teams to foresee obstacles and make decisions sooner, for instance on resources or scope, if risks emerge further along in their timeline.\n\n### Demo:\n\nIn this brief overview, Frontend Lead Jacob Schatz explains how burndown charts help managers keep a high-level view of their team's work, even as they help engineers self-pace and stay focused.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/zJU2MuRChzs\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n### Watch the webcast\n\nFor Jacob's entire presentation, [watch the **Managing the DevOps Culture Shift** webcast](https://www.youtube.com/watch?v=py8c6-3zyKM&feature=youtu.be) on demand!\n",{"slug":8313,"featured":6,"template":678},"mapping-work-to-do-versus-time-with-burndown-charts","content:en-us:blog:mapping-work-to-do-versus-time-with-burndown-charts.yml","Mapping Work To Do Versus Time With Burndown Charts","en-us/blog/mapping-work-to-do-versus-time-with-burndown-charts.yml","en-us/blog/mapping-work-to-do-versus-time-with-burndown-charts",{"_path":8319,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8320,"content":8326,"config":8330,"_id":8332,"_type":16,"title":8333,"_source":17,"_file":8334,"_stem":8335,"_extension":20},"/en-us/blog/cloud-native-demo",{"title":8321,"description":8322,"ogTitle":8321,"ogDescription":8322,"noIndex":6,"ogImage":8323,"ogUrl":8324,"ogSiteName":692,"ogType":693,"canonicalUrls":8324,"schema":8325},"Demo: cloud native development with GitLab","See how you can leverage the power of the cloud with GitLab 9.0.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671482/Blog/Hero%20Images/cloud-native-demo.png","https://about.gitlab.com/blog/cloud-native-demo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Demo: cloud native development with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2017-04-18\",\n      }",{"title":8321,"description":8322,"authors":8327,"heroImage":8323,"date":8328,"body":8329,"category":14},[4182],"2017-04-18","\n\n[Cloud native development](/topics/cloud-native/) means moving away from monolithic apps towards [microservices](/topics/microservices/): causing a spike in the number of projects and making consistent and efficient application lifecycle management more important than ever.\n\n\u003C!-- more -->\n\nCloud native applications embrace a new approach to building and running applications that takes full advantage of the cloud computing model and container schedulers. This is not to be confused with running traditional applications in the cloud: cloud native means that applications are purpose-built for the cloud, and consist of loosely coupled services. Applications are re-architected for running in the cloud – shifting the focus away from the machine to the service instead. Cloud native acknowledges that the cloud is about more than just who manages your servers – it is the next step in digital transformation.\n\n## Demo\n\nIn this video demonstration from our [Cloud Native webcast](https://www.youtube.com/watch?v=wtaOQY_ITvQ&feature=youtu.be), Head of Product Mark Pundsack shows how, in less than 10 minutes, you can install GitLab on Kubernetes, build a project, create review apps, store Docker images in a container registry, deploy to production on Kubernetes, and monitor with Prometheus.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/jfIyQEwrocw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\nWatch the whole [Cloud Native webcast](https://www.youtube.com/watch?v=wtaOQY_ITvQ&feature=youtu.be) to learn more about cloud-native development and GitLab's vision for it.\n",{"slug":8331,"featured":6,"template":678},"cloud-native-demo","content:en-us:blog:cloud-native-demo.yml","Cloud Native Demo","en-us/blog/cloud-native-demo.yml","en-us/blog/cloud-native-demo",{"_path":8337,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8338,"content":8343,"config":8347,"_id":8349,"_type":16,"title":8350,"_source":17,"_file":8351,"_stem":8352,"_extension":20},"/en-us/blog/ci-cd-demo",{"title":8339,"description":8340,"ogTitle":8339,"ogDescription":8340,"noIndex":6,"ogImage":1834,"ogUrl":8341,"ogSiteName":692,"ogType":693,"canonicalUrls":8341,"schema":8342},"Demo: CI/CD with GitLab in action","Watch our video to see how to get started using CI/CD with GitLab.","https://about.gitlab.com/blog/ci-cd-demo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Demo: CI/CD with GitLab in action\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2017-03-13\",\n      }",{"title":8339,"description":8340,"authors":8344,"heroImage":1834,"date":8345,"body":8346,"category":14},[4182],"2017-03-13","\n\nIf your developer team isn’t among the [majority of developers using Continuous Integration](/blog/ci-integral-to-everyday-work/) more than 75 percent of the time, what are you waiting for? In this video demonstration, Product Manager [Joshua Lambert](https://gitlab.com/joshlambert) shows just how easy it is to set up a project with [GitLab CI/CD](/topics/ci-cd/), so you can start seeing features in action from the moment your teams create them.\n\n\u003C!-- more -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/1iXFbchozdY\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\nThis demonstration is part of our webcast, “From Continuous Integration to Continuous Everything”. To find out more about Continuous Integration, [Delivery and Deployment](/blog/continuous-integration-delivery-and-deployment-with-gitlab/), and how to introduce a continuous mentality throughout your entire organization, [register now](https://page.gitlab.com/20170301_continuouseverything.html) to watch the whole webcast on demand.\n\nCover image: “[DSC_0179.jpg](https://www.flickr.com/photos/150654414@N02/32770042176)” by [Hilary Halliwell](https://www.flickr.com/photos/150654414@N02/) is licensed under [CC BY 2.0](https://creativecommons.org/licenses/by/2.0/)\n{: .note}\n",{"slug":8348,"featured":6,"template":678},"ci-cd-demo","content:en-us:blog:ci-cd-demo.yml","Ci Cd Demo","en-us/blog/ci-cd-demo.yml","en-us/blog/ci-cd-demo",{"_path":8354,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8355,"content":8360,"config":8365,"_id":8367,"_type":16,"title":8368,"_source":17,"_file":8369,"_stem":8370,"_extension":20},"/en-us/blog/why-we-are-not-leaving-the-cloud",{"title":8356,"description":8357,"ogTitle":8356,"ogDescription":8357,"noIndex":6,"ogImage":2478,"ogUrl":8358,"ogSiteName":692,"ogType":693,"canonicalUrls":8358,"schema":8359},"Why we are not leaving the cloud","What we learned from our community vetting our proposal to leave the cloud.","https://about.gitlab.com/blog/why-we-are-not-leaving-the-cloud","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we are not leaving the cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sean Packham\"}],\n        \"datePublished\": \"2017-03-02\",\n      }",{"title":8356,"description":8357,"authors":8361,"heroImage":2478,"date":8363,"body":8364,"category":14},[8362],"Sean Packham","2017-03-02","\n\n\u003Cscript>\n  var disqus_identifier = '/blog/why-we-are-not-leaving-the-cloud/';\n\u003C/script>\n\nTowards the end of 2016 we said we were [leaving the cloud for bare metal](/blog/why-choose-bare-metal/) and shared our [hardware proposal](https://news.ycombinator.com/item?id=13153031). In December 2016, after receiving hundreds of comments and emails filled with advice and warnings, [Sid and the team decided](https://gitlab.com/gitlab-com/infrastructure/issues/727#note_20044060) to keep GitLab.com in the cloud. The rest of the post summarizes some of the great community support and feedback we received and ends with how we are committed to making GitLab.com fast and stable in the cloud. Our decision was based on  more than what is below but we wanted to give you a good summary of all the interesting things that were shared publicly.\n\n\u003C!-- more -->\n\n## Let's begin on the topic of cost\n\n> When I was at Koding we made a similar move from AWS to bare metal. The costs were amazing. Something like $20k a month for what in AWS would cost $200k. I have been saying for a very long time that once you hit a certain scale AWS no longer makes sense. *[Geraint - GitLab blog: Going bare metal](/blog/why-choose-bare-metal/#comment-2999631471)*\n\n> We had 140 servers hosted in New York City for 10 years or so, and hosting only was going up and up, and contracts didn't give us flexibility to add cabinets when we needed. We basically had to cancel the previous contract, make a new one, pay for the upgrade, pay for the cabinet setup, etc... At some point, when we had financial trouble paying $14K/month for hosting, we decided to move all our servers from NYC to Tallinn, Estonia, where we built our own a small scale datacenter. As a result, we were able to cut hosting fees x10. *[Dmitri - GitLab blog: Proposed server purchase](/blog/proposed-server-purchase-for-gitlab-com/#comment-3049071074)*\n\n\u003Cdiv style=\"font-size: 38px; line-height: 1.2; margin: 45px 0 55px; font-style: italic;\">\nIt's not just the cost of owning and renewing the hardware, it's everything else that comes with it – daenney\n\u003C/div>\n\n> It's not just the cost of owning and renewing the hardware, it's everything else that comes with it. Designing your network, performance tuning and debugging everything. Suddenly you have a capacity issue, now what b/c you're not likely to have a spare 100 servers racked and ready to go, or be able to spin them up in 2m? Autoscaling? *[daenney - Hacker News: Proposed server purchase](https://news.ycombinator.com/item?id=13153296)*\n\n> Application Architecture is far more important than Cloud vs. Bare Metal. It is just easier and more cost effective to throw more bare metal hardware at the problem than it is cloud instances. For some this does make bare metal the better option. *[mohctp - Hacker News: Proposed server purchase](https://news.ycombinator.com/item?id=13162964)*\n\n> Moving to your own hardware will almost certainly improve performance, reduce incidental downtime, and cut costs substantially. Including hiring more engineers, you might expect total costs to be ~40-50% of what you would have spent on cloud-based services over the first 24 months. If your hardware lifecycle is 36-48 months, you will see large savings beyond 24 months. *[bobf - Hacker News: Proposed server purchase](https://news.ycombinator.com/item?id=13153413)*\n\n> I think they are going to underestimate the cost to GitLab in the long run. When they need to pay for someone to be a 30 minute drive from their DC 24/7/365 after the first outage, when they realize how much spare hardware they are going to want around, etc. *[manacit - Hacker News: Proposed server purchase](https://news.ycombinator.com/item?id=13154057)*\n\n## What About Performance?\n\n> A cloud service providers' biggest responsibilities to its customers are security, durability, availability and performance -- in that order. You guys are vastly underestimating the complexity involved in getting first 3 right. *[mritun - Hacker News: Proposed server purchase](https://news.ycombinator.com/item?id=13155809)*\n\n> Very few teams at Google run on dedicated machines. Those that do are enormous, both in the scale of their infrastructure and in their team sizes. I'm not saying always go with a cloud provider, I'm reiterating that you'd better be certain you need to. *[boulos - Hacker News: Going bare metal](https://news.ycombinator.com/item?id=12941210)*\n\n\u003Cdiv style=\"font-size: 38px; line-height: 1.2; margin: 45px 0 55px; font-style: italic;\">\nA company rolling their own system doesn't have to share, and they can optimise specifically for their own requirements – taneq\n\u003C/div>\n\n> As a cloud provider, though, you're trying to provide shared resources to a group of clients. A company rolling their own system doesn't have to share, and they can optimise specifically for their own requirements. *[taneq - Hacker News: Going bare metal](https://news.ycombinator.com/item?id=12940925)*\n\n> My thinking is that elasticity and recovery from hardware failure, and migration and multi-data center high availability will become concerns. Moving from the cloud to bare metal gives you performance and simplicity, but doesn't give you as many ways of recovering from network interruptions, and hardware failures. *[wpostma - the GitLab blog: Going bare metal](/blog/why-choose-bare-metal/#comment-3001348957)*\n\n> It sounds like they didn't design for the cloud and are now experiencing the consequences. The cloud has different tradeoffs and performance characteristics from a datacenter. If you plan for that, it's great. Your software will be robust as a result. If you assume the characteristics of a data center, you're likely to run into problems. *[wandernotlost - Hacker News: Going bare metal](https://news.ycombinator.com/item?id=12940082)*\n\n\u003Cdiv style=\"font-size: 38px; line-height: 1.2; margin: 45px 0 55px; font-style: italic;\">\nIt makes sense to keep GitLab.com as an eat-your-own-dog-food-at-scale environment – jtwaleson\n\u003C/div>\n\n> It makes sense to keep GitLab.com as an eat-your-own-dog-food-at-scale environment.  If one of their customers that run on-premise has performance issues they can't just say: GitLab.com uses a totally different architecture so you're on your own. They need GitLab.com to be as close as possible to the standard product. *[twaleson on Hacker News: Going bare metal](https://news.ycombinator.com/item?id=12940462)*\n\n> They are moving from cloud to bare metal because of performance while using a bunch of software that are notoriously slow and wasteful. I would optimise the hell out of my stack before commit to a change like this. Building your own racks does not deliver business value and it is extremely error prone process (been there, done that). *[StreamBright - Hacker News: Proposed server purchase](https://news.ycombinator.com/item?id=13153866)*\n\n## Advice on our storage proposals\n\n> __Don't f*ck with storage.__ 32 file servers for 96TB? Same question as with networking re:ceph. What are your failure domains? How much does it cost to maintain the FTEs who can run this thing? *[Spooky23 - Hacker News: Proposed server purchase](https://news.ycombinator.com/item?id=13153860)* - *Spooky23 did warn us \"I'm a cranky old person now\".*\n\n> I think there might be a pretty big IOPS drop when you switch over to this hardware. You're looking at having approximately 60 7200 RPM drives in this CephFS cluster. Doing the math, if you assume each of those drives can do 100 read and 100 write IOPS, and that you are doing 3x replication on write (plus journal writes), you're not going to get anywhere near the numbers that you want. *[Nicholas - the GitLab blog: Proposed server purchase](/blog/proposed-server-purchase-for-gitlab-com/#comment-3047537669)*\n\n>I would think that GitLab's workload is mostly random, which would pose a problem for larger drives. The SSDs are a great idea, but I've only seen 8TB drives used when there are 2 to 3 tiers; with 8TB drives being all the way on the bottom. I'm not sure how effective having a single SSD as a cache drive for 24TBs of 8TB disks will be. *[lykron - Hacker News: Proposed server purchase](https://news.ycombinator.com/item?id=13153333)*\n\n## and our choice of 8TB drives\n\n> If you are looking for performance, do not get the 8TB drives. In my experience, drives above 5TB do not have good response times. I don't have hard numbers, but I built a 10 disk RAID6 array with 5TB disks and 2TB disks and the 2TB disks were a lot more responsive. *[lykron - Hacker News: Proposed server purchase](https://news.ycombinator.com/item?id=13153196)*\n\n> Just a few quick notes. I've experience running ~300TB of usable Ceph storage. Stay away from the 8TB drives. Why are you using fat twins? Honestly, what does that buy you? You need more spindles, and fewer cores and memory. With your current configuration, what are you getting per rack unit? *[halbritt - Hacker News: Proposed server purchase](https://news.ycombinator.com/item?id=13153786)*\n\n##  Feedback on our network proposals\n\n>__Don't f*ck with networking.__ Do you have experience operating same or similar workloads on your super micro SDN? Will the CEO of your super micro VAR pickup his phone at 2AM when you call? *[Spooky23 - Hacker News: Proposed server purchase](https://news.ycombinator.com/item?id=13153860)*\n\n> I would not use 10GBase-T since it's designed for desktop use. I suggest ideally 25G SFP28 (AOC-MH25G-m2S2TM) but 10G SFP+ (AOC-MTG-i4S) is OK. The speed and type of the switch needs to match the NIC (you linked to an SFP+ switch that isn't compatible with your proposed 10GBase-T NICs). *[wmf - Hacker News: Proposed server purchase](https://news.ycombinator.com/item?id=13153678)*\n\n> I didn't see it mentioned but what are your plans for the network strategy. Are you planning to run dual-stack IPv4/IPv6 ? IPv4 only? Internal IPv6 only with NAT64 to the public stuff? Hopefully IPv6 shows up somewhere in the stack. It's sad to see big players not using it yet. *[tomschlick - Hacker News: Proposed server purchase](https://news.ycombinator.com/item?id=13153922)*\n\n> Don't fall into the trap of extending VLANs everywhere. You should definitely be routing (not switching) between different routers.\n>\n> \"Should we have a separate network for Ceph traffic?\" Yes, if you want your Ceph cluster to remain usable during rebuilds. Ceph will peg the internal network during any sort of rebuild event. *[devicenull - Hacker News: Proposed server purchase](https://news.ycombinator.com/item?id=13153339)*\n\n## What did the community have to say about Ceph?\n\n> I lead a technical operations team that moved our infrastructure from public cloud (~400 instances) to private cloud (~55 physical servers) and finally, to Kubernetes (6 physical servers). We actually run a mix of Kubernetes and OpenStack, putting apps and services in Kubernetes and all data storage in OpenStack. I've done extensive testing with Ceph and while it adds flexibility, you're not going to be able to touch the I/O performance of bare metal local disks for database use. For storage, I like to keep it simple. I rely on the Linux OS running on standard tried-and-true filesystems (ext4 and ZFS) and build redundancy at the software layer. *[Chris - GitLab blog: Proposed server purchase](/blog/proposed-server-purchase-for-gitlab-com/#comment-3047381500)*\n\n> We had disastrous experiences with Ceph and Gluster on bare metal. I think this says more about the immaturity (and difficulty) of distributed file systems than the cloud per se. *__[codinghorror - Hacker News: Going bare metal](https://news.ycombinator.com/item?id=12940042)__*\n\n> You need to make sure that there is not an architecture that you can build that absolves you of having to run a CephFS cluster. CephFS is cool, but it is a single point of failure right now, and comes with a ton of caveats. Performance and stability will be much improved if you remove the layer of abstraction it creates and write your app to handle some sort of distributed storage system. *[Nicholas - GitLab blog: Proposed server purchase](/blog/proposed-server-purchase-for-gitlab-com/#comment-3047478761)*\n\n\u003Cdiv style=\"font-size: 38px; line-height: 1.2; margin: 45px 0 55px; font-style: italic;\">\nBe very very careful about Ceph hype – late2part\n\u003C/div>\n\n> Be very very careful about Ceph hype. Ceph is good at redundancy and throughput, but not at IOPS, and Rados IOPS are poor. We couldn't get over 60k random RW IOPS across a 120 OSD cluster with 120 SSDs. *[late2part - GitLab blog: Proposed server purchase](https://news.ycombinator.com/item?id=13154620)*\n\n> If you're using CephFS and everyone else wants to be using other Cloud storage solutions, that would actually put you at a disconnect with your users and leave room for a competitor with the tools and experience to scale out on Cloud storage to come in offering support. *[Rapzid - Hacker News: Going bare metal](https://news.ycombinator.com/item?id=12946174)*\n\n## How would moving to metal affect the GitLab team?\n\n> Your core competency is code, not infrastructure, so striking out to build all of these new capabilities in your team and organization will come at a cost that you can not predict. Looking at total cost of ownership of cloud vs steel isn't as simple as comparing the hosting costs, hardware and facilities. *[ninjakeyboard - Hacker News: Proposed server purchase](https://news.ycombinator.com/item?id=13153779)*\n\n\u003Cdiv style=\"font-size: 38px; line-height: 1.2; margin: 45px 0 55px; font-style: italic;\">\nYour core competency is code, not infrastructure – ninjakeyboard\n\u003C/div>\n\n> Another problem I would say to move to metal is that you lose support. Cloud vendors have entire teams, network, systems, datacenters etc. at your disposal, this is included in the price you are paying. Are you sure you are ready to debug networking issues, systems problems at the level as the cloud vendors? It is a tough job. *[l1x - GitLab blog: Proposed server purchase](/blog/proposed-server-purchase-for-gitlab-com/#comment-3047353138)*\n\n> I think you're under estimating the number of people required to run your own infrastructure. You need people who can configure networking gear, people swapping out failed NICs/Drives at the datacenter, someone managing vendor relationships, and people doing capacity planning. *[thebyrd-on Hacker News: Proposed server purchase](https://news.ycombinator.com/item?id=13153644)*\n\n## Let’s just abandon x86 altogether\n\n\u003Cdiv style=\"font-size: 38px; line-height: 1.2; margin: 45px 0 55px; font-style: italic;\">\nWhy bind yourself to Intel servers? – MBH\n\u003C/div>\n\n> Why bind yourself to Intel servers? The max CPU-to-Memory bandwidth is 68 GB/s. That's horrible for crunching data fast. IBM's POWER8 systems have servers with 230 GB/s CPU-to-Memory bandwidth, and others with 320 GB/s...\n>\n> ...POWER8 CPUs have a different architecture than Intel: PPC64, so you may need to recompile some things, or have some Intel systems for workloads that can only run on x86_64. *[MBH - GitLab blog: Proposed server purchase](/blog/proposed-server-purchase-for-gitlab-com/#comment-3053432409)*\n\n## We all have an opinion\n\n> I've only ever built desktop machines, and this top comment drew a surprising parallel to most help me with my desktop build type posts. Granted, I'm sure as you dig deeper, the reasoning may be much different, but myself being ignorant about a proper server build, it was somehow reassuring to see power and cooling at the top! *[davidbrent - Hacker News: Proposed server purchase](https://news.ycombinator.com/item?id=13154202)*\n\n## We are taking a step back and using a boring solution\n\nWe want to scale intelligently and build great software; we don’t want to be an infrastructure company. We are embracing and are excited about solving the challenge of scaling GitLab.com on the cloud, because solving it for us also solved it for the largest enterprises in the world using GitLab on premise.\n\nMost of the scaling headaches have occurred because Git is read-heavy: looking at our Git Read/Write performance chart below, you can see that for about every 300 reads we get 10 writes. We tried to solve this by running CephFS in the cloud but it goes against our value of using the simplest, most  [boring solution](/handbook/#values) for a problem.\n\n![An average of 300 Reads to 10 writes](https://about.gitlab.com/images/blogimages/why-we-are-not-leaving-the-cloud-chart.png)\n\n## How are we going to get back to basics?\n\n1. We spread all our storage into [multiple NFS shards](https://gitlab.com/gitlab-com/infrastructure/issues/711) and [dropped CephFS](https://gitlab.com/gitlab-com/infrastructure/issues/817) from our stack.\n2. We created [Gitaly](https://gitlab.com/gitlab-org/gitaly) so that we can stop relying on NFS for horizontal scaling and speed up Git access through caching.\n\n[Gitaly](https://gitlab.com/gitlab-org/gitaly) will serve as the single interface for all our Git access throughout our stack. With Gitaly the gitrpc travels over the network and the disk is accessed locally. Instead of all the disk access going over the network. It will also be used to improve our monitoring of Git resource usage to make better decisions; currently we are only sampling processes.\n\nWe would love if the community would challenge our use of Gitaly with the same passion they challenged us before. What do you think of the software architecture? Can a caching layer like this scale? What alarm bells are set off? We can’t wait to hear your feedback!\n\nWe would like to thank our community, customers, team and board for all their great support – you all make GitLab an incredible product.\n",{"slug":8366,"featured":6,"template":678},"why-we-are-not-leaving-the-cloud","content:en-us:blog:why-we-are-not-leaving-the-cloud.yml","Why We Are Not Leaving The Cloud","en-us/blog/why-we-are-not-leaving-the-cloud.yml","en-us/blog/why-we-are-not-leaving-the-cloud",{"_path":8372,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8373,"content":8379,"config":8383,"_id":8385,"_type":16,"title":8386,"_source":17,"_file":8387,"_stem":8388,"_extension":20},"/en-us/blog/how-our-ux-team-worked-through-ideation-using-the-four-step-sketch",{"title":8374,"description":8375,"ogTitle":8374,"ogDescription":8375,"noIndex":6,"ogImage":8376,"ogUrl":8377,"ogSiteName":692,"ogType":693,"canonicalUrls":8377,"schema":8378},"How our UX team worked through ideation using the Four-Step Sketch","During our recent Summit, the UX team customized the Google Ventures Design Sprint process to tackle some of our larger feature proposals.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749684097/Blog/Hero%20Images/facilitating-ideas--overview.jpg","https://about.gitlab.com/blog/how-our-ux-team-worked-through-ideation-using-the-four-step-sketch","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How our UX team worked through ideation using the Four-Step Sketch\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taurie Davis\"}],\n        \"datePublished\": \"2017-02-23\",\n      }",{"title":8374,"description":8375,"authors":8380,"heroImage":8376,"date":8381,"body":8382,"category":14},[7784],"2017-02-23","\n\nThroughout our [summit in Cancún](/blog/gitlab-mexico-summit-2017/), the UX team took advantage of the time we had together by creating workshops that dived into some larger features we thought would benefit from discussing, brainstorming, and sketching. We chose the idea of a [smart project dashboard](https://gitlab.com/gitlab-org/gitlab-ce/issues/22551) as one of the features to focus on as we worked through the ideation phase.\n\n\u003C!-- more -->\n\nIn one of our two hour workshops, we took an expedited approach to the [Google Ventures Design Sprint](http://www.gv.com/sprint/) process. Their method is focused on answering critical questions through design over the course of five days. Customizing the exercise for our limited time, we used portions of their checklist from [Day 2](https://library.gv.com/sprint-week-tuesday-d22b30f905c3). In this post, I'll share how we set ourselves up for a productive session.\n\n### The problem\n\nOne of our primary goals was to create a tool that reflects the status of a project as it evolves through each step of the idea-to-production lifecycle. Before getting started, we established some basic details regarding the feature that would help provide a structure for discussion:\n\n- Content would be customized to the project, but every user would see the same information.\n- As someone unfamiliar with the project, I can, at a glance, understand the overall status of the project.\n- As someone familiar with the project, I can understand the status of the areas I care about. It is also clear which items needs my attention.\n\nWe used these as guidelines to start our exploration into what it could mean to transform the project dashboard.\n\n### The process\n\nWith an ambitious problem now in mind, we structured our workshop to focus on the ideation phase using Google Ventures' Four-Step Sketch as inspiration:\n\n- Notes/Ideas: 10 mins\n- Crazy 8s: 8 mins\n- Solution sketch/storyboards: 30 mins\n- Silent critique: 10 mins\n\nWe began with 10 minutes to jot down notes and ideas individually. We used this time to become more familiar with the current state of the dashboard, think about possibilities of where it could go, and reflect on questions that arose as a result. These notes were meant for ourselves, as a way to review and understand the problem.\n\nAfter gathering our thoughts, we went straight into the rapid iterative sketching process: Crazy 8s. This method allowed the team to individually generate a lot of ideas quickly by sketching eight different interactions within an eight minute time frame. With one minute per frame, we were forced to throw perfection out the window and focus on getting ideas onto paper. Many of us began to feel like we were scraping the bottom of the barrel to come up with new sketches as the minutes passed, but this is often when great solutions begin to arise.\n\n![crazyeights](https://about.gitlab.com/images/blogimages/how-our-ux-team-worked-through-ideation-using-the-four-step-sketch/facilitating-ideas--crazyeights.jpg){: .shadow}\n\nAfter thinking about the problem and potential solutions individually, we began solution-sketching through storyboards. This gave us the chance to further develop the details of a solution we chose through the crazy 8s. We started with a blank sheet of paper, placed three sticky notes on the page to represent three frames, and spent twenty minutes sketching more detailed wireframes. In the surrounding white space, we named our storyboard and wrote a brief explanation of the idea in order to ensure that the frame was understandable without verbal explanation. This helped us prepare for the next step, our silent critique.\n\n![storyboards](https://about.gitlab.com/images/blogimages/how-our-ux-team-worked-through-ideation-using-the-four-step-sketch/facilitating-ideas--storyboard.jpg){: .shadow}\n\nWe moved to an area in the room that allowed us to hang our storyboards on the wall and gave everyone dot stickers to use as part of the critique. We spent ten minutes looking at all of the solutions and placed stickers on every idea that we liked, focusing only on the positive. There were no rules regarding how many stickers you used. After we had finished, multiple ideas began to stand out among our boards.\n\nWe naturally began discussing all the solutions and adding sticky notes to the wall when we came up with a prominent idea or question to come back to. We spent about 45 minutes doing a group critique and discussing the storyboards in more detail. This also gave us the opportunity to ask questions and get clarification on aspects that may not have been immediately clear. We saw patterns emerge from the different sketches and common ideas became more prominent.\n\n![team](https://about.gitlab.com/images/blogimages/how-our-ux-team-worked-through-ideation-using-the-four-step-sketch/facilitating-ideas--team.jpg){: .shadow}\n\n### Next steps\n\nAt the end of our workshop we had generated a number of ideas for the new project dashboard. The Four-Step Sketch was great for opening our minds and allowing us to develop and communicate solutions. You can [view all of our sketches](https://drive.google.com/drive/folders/0B-PqsmU0p5QVMFZCNm4yRFhBblU?usp=sharing), and [contribute to the issue](https://gitlab.com/gitlab-org/gitlab-ce/issues/27112) as we continue to flesh out details surrounding the new dashboard.\n\n*How does your team work together through the ideation phase? Get in touch through the comments below or via taurie@gitlab.com*\n",{"slug":8384,"featured":6,"template":678},"how-our-ux-team-worked-through-ideation-using-the-four-step-sketch","content:en-us:blog:how-our-ux-team-worked-through-ideation-using-the-four-step-sketch.yml","How Our Ux Team Worked Through Ideation Using The Four Step Sketch","en-us/blog/how-our-ux-team-worked-through-ideation-using-the-four-step-sketch.yml","en-us/blog/how-our-ux-team-worked-through-ideation-using-the-four-step-sketch",{"_path":8390,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8391,"content":8397,"config":8402,"_id":8404,"_type":16,"title":8405,"_source":17,"_file":8406,"_stem":8407,"_extension":20},"/en-us/blog/setting-up-gitlab-pages-with-cloudflare-certificates",{"title":8392,"description":8393,"ogTitle":8392,"ogDescription":8393,"noIndex":6,"ogImage":8394,"ogUrl":8395,"ogSiteName":692,"ogType":693,"canonicalUrls":8395,"schema":8396},"Setting up GitLab Pages with Cloudflare Certificates","How to set up GitLab Pages with a Cloudflare SSL/TLS Certificate for your (sub)domain","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671091/Blog/Hero%20Images/lock.jpg","https://about.gitlab.com/blog/setting-up-gitlab-pages-with-cloudflare-certificates","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Setting up GitLab Pages with Cloudflare Certificates\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Marcia Ramos\"}],\n        \"datePublished\": \"2017-02-07\",\n      }",{"title":8392,"description":8393,"authors":8398,"heroImage":8394,"date":8400,"body":8401,"category":14},[8399],"Marcia Ramos","2017-02-07","[Cloudflare SSL/TLS certificates][cert] are free to use.\nIf you want your [GitLab Pages] site to work with them,\nit's as simple as could be. There's just a **trick**\nyou might not know about (yet)!\n\nWe assume you're familiar with SSL/TLS, DNS, GitLab Pages,\nand Cloudflare.\n\n## Introduction\n\nWith [GitLab Pages], you can [deploy a static website][pages-setup-post]\nwith custom domains/subdomains and SSL/TLS support.\n\nThis tutorial responds to the issue\n\"[Support Cloudflare CA please!](https://gitlab.com/pages/pages.gitlab.io/issues/29)\",\nand other cases where GitLab users asked specifically how\nto add a Cloudflare certificate to GitLab Pages. 😉 Anything else\nis outside the scope of this post.\n\nIf you don't know how to set up your GitLab Pages site, or why you\nshould care about SSL/TLS:\n\n- Take a look at the [GitLab Pages] overview\n- Read the step-by-step tutorial [Hosting on GitLab.com with GitLab Pages][pages-setup-post]\n- Read the series \"**Static Site Generators** (SSGs)\":\n  - [SSGs Part 1: Static vs Dynamic Websites][ssg-1]\n  - [SSGs Part 2: Modern Static Site Generators][ssg-2]\n  - [SSGs Part 3: Build any SSG site with GitLab Pages][ssg-3]\n- Read the [documentation on GitLab Pages][pages-docs]\n- Read an [overview on the importance of HTTPS][post-startssl]\n\nIn case you don't know about it yet, we're bringing [GitLab Pages to GitLab Community Edition][pages-ce]! 🎉\n\n## Step-by-step quick guide\n\nTo create this step-by-step guide, I'll use my subdomain\n`https://cloudflare.marcia.ml` as an alias for the website\noriginally deployed to \u003Chttps://gitlab-tests.gitlab.io/jekyll>.\n\nThe codebase is a simple [Jekyll] site built with its default\ntheme, available at \u003Chttps://gitlab.com/gitlab-tests/jekyll>.\n\n### STEP 1. DNS record\n\nOn Cloudflare, navigate to the tab **DNS** and create a new\nDNS record ([`CNAME`][cname] or [`A`][a]) pointing your\nsubdomain (`CNAME`) or root domain (`A`)\n[to your GitLab Pages site][pages-setup-domains].\n\nThe image below shows both `CNAME` and `A` records\n(for the purposes of this demo), but of course, you will only\nneed one of them. For this example, I used the `CNAME` record pointing `gitlab-tests.gitlab.io` to my subdomain `cloudflare.marcia.ml`:\n\n![set up Cloudflare DNS](https://about.gitlab.com/images/blogimages/setting-up-gitlab-pages-with-cloudflare-certificates/cloudflare-dns-settings.jpg)\n\nFor projects on GitLab.com, the DNS `A` record should point your custom\ndomain to [GitLab Pages][pages-ip]' server IP address `35.185.44.232`.\n\n**Note:** This GitLab Pages IP address for GitLab.com changed from `52.167.214.135` to `35.185.44.232` in August 2018.\n\nIf you'd already set this up, just jump to the second step.\n\n### STEP 2. Generate your certificate\n\n1. Navigate to the tab **Crypto**.\n1. Generate your certificate:\n\n    ![generate certificate](https://about.gitlab.com/images/blogimages/setting-up-gitlab-pages-with-cloudflare-certificates/generate-certificate.png)\n\n1. Choose the domain, subdomain, or wildcard to apply the cert to, then click **Next**:\n\n    ![choose-domain](https://about.gitlab.com/images/blogimages/setting-up-gitlab-pages-with-cloudflare-certificates/certificate-setup.png)\n\n1. Your certificate and private key have been generated. Leave the tab and the modal window open:\n\n    ![modal with certificate](https://about.gitlab.com/images/blogimages/setting-up-gitlab-pages-with-cloudflare-certificates/modal-window.png)\n\n### STEP 3. Add the custom (sub)domain and the certificate to your GitLab Pages project\n\nFrom a new tab, go to GitLab, and navigate to your project's **Settings** > **Pages** > **+ New Domain**:\n\n![configure GitLab Pages](https://about.gitlab.com/images/blogimages/setting-up-gitlab-pages-with-cloudflare-certificates/configure-gitlab-pages.png)\n\nCopy the PEM certificate and the private key from the tab you've\nleft open on Cloudflare, and paste it into their respective fields in GitLab:\n\n![Add PEM certificate to Pages](https://about.gitlab.com/images/blogimages/setting-up-gitlab-pages-with-cloudflare-certificates/add-certificate-to-pages.png)\n\n### STEP 4. The trick\n\nCloudflare doesn't combine both PEM and root certificates in one,\nso we need to copy the root certificate (aka \"intermediate\")\n**[Cloudflare Origin CA — RSA Root][root]** from the code block\nbelow, and **paste it below your certificate (PEM)** just added to GitLab:\n\nCopy Cloudflare's Origin CA — RSA Root:\n\n```\n-----BEGIN CERTIFICATE-----\nMIIEADCCAuigAwIBAgIID+rOSdTGfGcwDQYJKoZIhvcNAQELBQAwgYsxCzAJBgNV\nBAYTAlVTMRkwFwYDVQQKExBDbG91ZEZsYXJlLCBJbmMuMTQwMgYDVQQLEytDbG91\nZEZsYXJlIE9yaWdpbiBTU0wgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MRYwFAYDVQQH\nEw1TYW4gRnJhbmNpc2NvMRMwEQYDVQQIEwpDYWxpZm9ybmlhMB4XDTE5MDgyMzIx\nMDgwMFoXDTI5MDgxNTE3MDAwMFowgYsxCzAJBgNVBAYTAlVTMRkwFwYDVQQKExBD\nbG91ZEZsYXJlLCBJbmMuMTQwMgYDVQQLEytDbG91ZEZsYXJlIE9yaWdpbiBTU0wg\nQ2VydGlmaWNhdGUgQXV0aG9yaXR5MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRMw\nEQYDVQQIEwpDYWxpZm9ybmlhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC\nAQEAwEiVZ/UoQpHmFsHvk5isBxRehukP8DG9JhFev3WZtG76WoTthvLJFRKFCHXm\nV6Z5/66Z4S09mgsUuFwvJzMnE6Ej6yIsYNCb9r9QORa8BdhrkNn6kdTly3mdnykb\nOomnwbUfLlExVgNdlP0XoRoeMwbQ4598foiHblO2B/LKuNfJzAMfS7oZe34b+vLB\nyrP/1bgCSLdc1AxQc1AC0EsQQhgcyTJNgnG4va1c7ogPlwKyhbDyZ4e59N5lbYPJ\nSmXI/cAe3jXj1FBLJZkwnoDKe0v13xeF+nF32smSH0qB7aJX2tBMW4TWtFPmzs5I\nlwrFSySWAdwYdgxw180yKU0dvwIDAQABo2YwZDAOBgNVHQ8BAf8EBAMCAQYwEgYD\nVR0TAQH/BAgwBgEB/wIBAjAdBgNVHQ4EFgQUJOhTV118NECHqeuU27rhFnj8KaQw\nHwYDVR0jBBgwFoAUJOhTV118NECHqeuU27rhFnj8KaQwDQYJKoZIhvcNAQELBQAD\nggEBAHwOf9Ur1l0Ar5vFE6PNrZWrDfQIMyEfdgSKofCdTckbqXNTiXdgbHs+TWoQ\nwAB0pfJDAHJDXOTCWRyTeXOseeOi5Btj5CnEuw3P0oXqdqevM1/+uWp0CM35zgZ8\nVD4aITxity0djzE6Qnx3Syzz+ZkoBgTnNum7d9A66/V636x4vTeqbZFBr9erJzgz\nhhurjcoacvRNhnjtDRM0dPeiCJ50CP3wEYuvUzDHUaowOsnLCjQIkWbR7Ni6KEIk\nMOz2U0OBSif3FTkhCgZWQKOOLo1P42jHC3ssUZAtVNXrCk3fw9/E15k8NPkBazZ6\n0iykLhH1trywrKRMVw67F44IE8Y=\n-----END CERTIFICATE-----\n```\n\nPaste it below your PEM certificate (jump a line between the\nlast row of your cert `-----END CERTIFICATE-----` and the\nfirst row of the intermediate cert `-----BEGIN CERTIFICATE-----`):\n\n![Add intermediate certificate](https://about.gitlab.com/images/blogimages/setting-up-gitlab-pages-with-cloudflare-certificates/add-intermediate-certificate.png){:.shadow}\n\n### STEP 5. Apply the changes\n\n1. Click **Create New Domain**.\n1. Ta-da! 🎉\n\n    ![Screen_Shot_2016-12-21_at_13.52.02](https://about.gitlab.com/images/blogimages/setting-up-gitlab-pages-with-cloudflare-certificates/certificate-added.png){:.shadow}\n\nIt works fine with the encryption set to _\"Full\"_ or _\"Full (strict)\"_ on Cloudflare:\n\n![Set Cloudflare SSL to full strict](https://about.gitlab.com/images/blogimages/setting-up-gitlab-pages-with-cloudflare-certificates/cloudflare-settings-ssl-strict.png)\n\n## Wrap up\n\nThat's it, now your site runs on HTTPS with a custom domain\nand a free Cloudflare certificate, valid up to 15 years!\n\nComments, questions, suggestions? Please comment below or tweet [@GitLab]! 😀\n\n----\n\n[Cover image] by [Rita Morais], licensed under [CC0 1.0][cc].\n\n\u003C!-- identifiers -->\n\n[@GitLab]: https://twitter.com/gitlab\n[a]: https://support.dnsimple.com/articles/a-record/\n[cc]: https://unsplash.com/license\n[cert]: https://www.cloudflare.com/ssl/\n[cloudflare]: https://www.cloudflare.com\n[cname]: https://en.wikipedia.org/wiki/CNAME_record\n[Cover image]: https://unsplash.com/collections/427463/locks?photo=q6vBEPqsojc\n[GitLab Pages]: https://pages.gitlab.io\n[Jekyll]: https://jekyllrb.com/\n[pages-ce]: /releases/2016/12/24/were-bringing-gitlab-pages-to-community-edition/\n[pages-docs]: http://doc.gitlab.com/ee/pages/README.html#getting-started-with-gitlab-pages\n[pages-ip]: https://docs.gitlab.com/ee/user/gitlab_com/#gitlab-pages\n[pages-setup-domains]: /blog/gitlab-pages-setup/#custom-domains\n[pages-setup-post]: /blog/gitlab-pages-setup/\n[post-startssl]: /2016/06/24/secure-gitlab-pages-with-startssl/#https-a-quick-overview\n[Rita Morais]: https://unsplash.com/@moraisr\n[root]: https://support.cloudflare.com/hc/en-us/articles/218689638\n[ssg-1]: /blog/ssg-overview-gitlab-pages-part-1-dynamic-x-static/\n[ssg-2]: /blog/ssg-overview-gitlab-pages-part-2/\n[ssg-3]: /blog/ssg-overview-gitlab-pages-part-3-examples-ci/\n[static site]: /blog/ssg-overview-gitlab-pages-part-1-dynamic-x-static/",{"slug":8403,"featured":6,"template":678},"setting-up-gitlab-pages-with-cloudflare-certificates","content:en-us:blog:setting-up-gitlab-pages-with-cloudflare-certificates.yml","Setting Up Gitlab Pages With Cloudflare Certificates","en-us/blog/setting-up-gitlab-pages-with-cloudflare-certificates.yml","en-us/blog/setting-up-gitlab-pages-with-cloudflare-certificates",{"_path":8409,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8410,"content":8416,"config":8420,"_id":8422,"_type":16,"title":8423,"_source":17,"_file":8424,"_stem":8425,"_extension":20},"/en-us/blog/vue-big-plan",{"title":8411,"description":8412,"ogTitle":8411,"ogDescription":8412,"noIndex":6,"ogImage":8413,"ogUrl":8414,"ogSiteName":692,"ogType":693,"canonicalUrls":8414,"schema":8415},"Our big Frontend plan revealed","Our long term plan to make GitLab as fast and performant as possible with Vue and webpack.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683983/Blog/Hero%20Images/vue-big-plan-cover.png","https://about.gitlab.com/blog/vue-big-plan","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Our big Frontend plan revealed\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jacob Schatz\"}],\n        \"datePublished\": \"2017-02-06\",\n      }",{"title":8411,"description":8412,"authors":8417,"heroImage":8413,"date":8418,"body":8419,"category":14},[6995],"2017-02-06","\n\nThe Frontend at GitLab is getting better and better every day. Today we did two big things, and I'd like to share them with you and our big plans for the future.\n\n\u003C!--more-->\n\n\u003Cblockquote style=\"color: red\">\n\u003Cul>\n  \u003Cli>If you use the GDK, then make sure you update it! If you have no idea what I am talking about, then just keep reading.\u003C/li>\n  \u003Cli>Please see the \u003Ca href='https://gitlab.com/gitlab-org/gitlab-development-kit/blob/master/doc/update-gdk.md'>documentation\u003C/a> for instructions on updating your GDK.\u003C/li>\n  \u003Cli>Please see our \u003Ca href='https://gitlab.com/gitlab-org/gitlab-development-kit/blob/master/doc/howto/troubleshooting.md#webpack'>troubleshooting guide\u003C/a> for any issues when updating your GDK.\u003C/li>\n  \u003Cli>Feel free to \u003Ca href='https://gitlab.com/gitlab-org/gitlab-ce/issues/new'>report\u003C/a> any additional issues you find.\u003C/li>\n\u003C/ul>\n\u003C/blockquote>\n\n## Our big Frontend plan\n\n[Vue](https://vuejs.org/) is awesome. I wrote an article a while ago that showed [GitLab's love for Vue](/blog/why-we-chose-vue/). Today's article is a way to show our plan over the long term to make GitLab as fast and performant as possible with Vue and webpack. We want to make GitLab the easiest to develop for Frontend Developers.\n\nOne of the lessons I live by is \"It's not _always_ about the tools you use, but **how** you use them.\"  Saying \"we chose Vue\", does not imply success. This also means that we could be using Angular or React and have just as awesome of a product. Vue is simply the way there.\n\nHow do we plan to use Vue over the long run to make GitLab better, faster, easier and more awesome?\n\nThe plan below is a work in progress and very ambitious, but I believe that it will result in a much better frontend for development and performance. This document is also a reference to myself of the things we plan to do here at GitLab's Frontend.\n\n## A healthier Frontend\n\nWhen I started at GitLab, our stack was (oversimplifying here) Rails with jQuery. It hasn't changed much big picture wise except for Vue. Smaller picture, we've added many linters, better code coverage, and many other great things.\n\n### 1. Rewrite only what you need to\n\nWe are not rewriting GitLab's frontend entirely in Vue. That would be a very bad idea. It's not a bad idea for everyone, but it's a bad idea for a startup. It would cost a tremendous amount of time and money. The existing jQuery code (although some say is uncool) has been tested and works very well. There is no need to rewrite functionality that works well, unless there is going to be a major gain.\n\nWe also aren't writing every new thing in Vue. You do not need to do this either. But, it would be hard to find some part of the UI that would not benefit from even the simplest parts of Vue.\n\nExamples of this are:\n\n1. The issue page (which shows an individual issue), has a lot of jQuery on it. We won't rewrite now, because it works well. We will rewrite small parts in Vue once we make certain features more real-time. We are currently making the title and description real time.\n\n1. The [Issue Boards](/stages-devops-lifecycle/issueboard/), which [Phil](https://twitter.com/iamphill) wrote, was a perfect candidate for Vue. It was a brand new feature and had lots of reactive parts.\n\n1. The current issue page loads all comments at once and adds lots of event listeners to the page. This page could benefit from Vue for performance reasons. We could make the comment section a Vue app and make the comments a component with the emoji picker as components as well, etc. While we're in there, we'll amp up the UX by allowing you to see the comment you linked to immediately without waiting. There are better ways to show massive amounts of comments so we have to potentially rethink that.\n\n1. The pipelines page rewritten in vue for the arrival of real time updating.\n\n1. The environments was written in Vue.\n\n1. There are many other places where we will be using Vue in the future and where we are already using Vue. Too many to list here.\n\nAs you can see, we won't just slap Vue on everything.\n\n### 2. Add in webpack\n\nRails has this awesome system of grabbing your Ruby libraries and bundling them into your app. `bundle install` will install all the stuff you need from your `Gemfile`. So why does Frontend have to stick all their libraries in the `vendor` directory? Are we not on point enough to have our own library delivery system? The javascript ecosystem has matured since the asset pipeline first arrived, and we now have `npm install` and advanced code bundling tools that we can take advantage of.\n\nBy [introducing webpack into the equation (merged and ready for action!)](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/7288) we gain multiple benefits.\n\n1. Javascript libraries aren't being bundled directly with the [GitLab source code](/solutions/source-code-management/) or included within gem wrappers. e.g. `jquery-ui` or `bootstrap-rails` are included as a ruby gem and we are at the mercy of the gem maintainer to keep the Javascript library up to date.\n1. When code is shared between files, we can make sure we don't load [lodash](https://lodash.com/) twice, for example. If both files load lodash, we should only load the code for lodash once. Not only will lodash not be included twice, but with [tree shaking](https://webpack.js.org/guides/tree-shaking/) only the components of lodash that we use will be included rather than the whole library.\n1. We can add [hot module replacement](https://webpack.js.org/concepts/hot-module-replacement/) to make our Vue development quicker. This is a development bonus, as our current development takes loads of time to refresh the page while developing GitLab. Spicy!\n1. We can now manage our dependencies properly. This should help a lot of frontenders to contribute to GitLab. Devs won't need to figure out the whole Rails Javascript situation in order to contribute. We can also dictate manually what we want to include.\n1. SVGs are going to be huge.\n    1. [webpack](https://webpack.js.org/) bundles SVGs directly into our Javascript.\n    1. Right now, SVGs live in a specific directory in Rails. We use Rails helpers to pull in SVGs. With webpack we can pull in SVGs one at a time because webpack precompiles assets.\n    1. We won't have to fetch SVGs with an HTTP request.\n    1. We don't have to do tricky HTML hidden elements which is technical debt.\n    1. We don't have to mess around with SVGs in CSS. You cannot change the color of SVGs in CSS.\n1. We use a lot of Ruby to solve Javascript and CSS problems. Now we can solve those problems on our own using only frontend tools.\n1. Using webpack's [CommonsChunkPlugin](https://webpack.js.org/plugins/commons-chunk-plugin/) we split all of our common vendor libraries into their own separate file. Since these change very infrequently, they can stay cached for a much longer period of time.\n1. With webpack's [code splitting](https://webpack.js.org/guides/code-splitting/) feature you can load just the JS you need to boot. Then you do a `require.ensure()` or `System.import()`. With this, we can tell webpack to request only exact JS you need. It keeps the size of the file really small. For example if you have `modal.js` for modals. If someone never uses the modals the code never loads. As soon as someone opens a modal, the JS gets loaded on demand.\n1. We can now properly manage our global scope. We can now do a `import x from y` instead of having our scripts pollute the global scope and pass classes around on `window.gl.lol`.\n1. We can slim down the our Vue bundles because we can precompile templates and omit the template compiler from our production code. [Evan You](https://twitter.com/youyuxi) (the creator of VueJS) explains this in the [feature overview for Vue 2.0](https://github.com/vuejs/vue/issues/2873):\n  > There will be two different builds:\n  > - Standalone build: includes both the compiler and the runtime. ...\n  > - Runtime only build: since it doesn't include the compiler, you need to either pre-compiled templates in a compile step, or manually written render functions.\n\n\n### 3. Remove Turbolinks\n\nWe used [TurboLinks](https://github.com/turbolinks/turbolinks) in GitLab, but we've recently [removed it](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/8570) with the linked merge request, merged on 2017/02/03.\n\n#### What does Turbolinks achieve?\n\nWith TurboLinks, clicking a link won't navigate to a new page in the default browser `GET` request way. Instead, Turbolinks will replace the `body` tag of your app with the new content. All your Javascript is loaded one time, when using the asset pipeline. This usually only loads some small HTML and JavaScript. On GitLab, our pages would load an average of 20kb on each page load versus the full JavaScript file size of 800kb+. Turbolinks is a great solution for many projects. When you start introducing slightly more complex Javascript it becomes a pain.\nWe did speed tests on pages with Turbolinks and without Turbolinks and we found that the pages without Turbolinks performed better. We discovered that Turbolinks works well when you don't have a lot of event listeners to manage. To add to this, we will be able to make our pages even faster in the future because we will divide the Javascript up between pages better with the help of webpack. We were previously writing a lot of extra code to handle all of Turbolink's problems and we can remove that code now.\n\n#### The problem we need to solve\n\nWhen your JS is loaded one time for multiple pages, events become a major problem. If you are using `gem 'jquery-turbolinks'` as we are, then the `$` `ready` function will fire on every page load even though the page isn't loading in the traditional sense. It's painful to write page specific Javascript without including it for the whole app. We do it and it's fine, but, why? There really isn't a reason for a lot of our JS that needs to be included on every page.\n\nAny external links do load faster so, we need to be careful about performance.\n\nIf you aren't careful, your events will get loaded multiple times and thus fire multiple times. For example, take the following code:\n\n```js\n$(function(){\n  $(document).on('click','.some-element', function(){\n    console.log('Click loaded');\n  }\n});\n```\n\nThat click event will be loaded on every page and thus fire multiple times every time `.some-element` is clicked.\n\n#### The Solutions\nThere are a few remedies to this problem. Some are good and some are bad.\n\n1. Don't create events in `$` `ready` callbacks.\n2. Use the following stinky solution:\n\n    ```js\n    $(document)\n    .off('click', '.some-element')\n    .on('click'...\n    ```\n\n     I call this the `die live` method. Old jQuery code people use to write `die().live()` everywhere. That's the old school jQuery `off().on()`.\n3. Write an event manager to be a delegate for all added events.\n4. [Remove Turbolinks](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/8570) and make sure you load only the code you need on each page.\n\nI am opting for option 4, in order to make our development lives easier and get multiple performance gains.\n\n#### The Bonus\n\nAfter we remove Turbolinks we can do something really cool. We can have each page live on its own. Then, certain pages can be their own Vue apps. For example, we can make the file browser its own Vue application. The merge request page can be its own application. The code for the file viewer won't need to be loaded on any other page and the same goes for other pages. This is not anything new, this is just basic web development. This is also not a new paradigm, and we would not be the first.\n\n## Conclusion\nThere is the argument for making the whole site a single page application, but I think this would just be the hardest to maintain and has zero benefits for the performance and the user. Also, there's a higher chance of making GitLab a janky app. For example, the profile page could be potentially very light, and there would be no reason for that if someone is linked directly to the profile page; it should load every single piece of Javascript in our project.\n\nThis is just one small step for GitLab and one giant leap for the frontend team. In the future you will see many new cool things coming from our team. This move was one step in that direction.\n\nQuestions, suggestions, ideas? Please leave a comment\nbelow or tweet at us [@GitLab](https://twitter.com/gitlab)!\n",{"slug":8421,"featured":6,"template":678},"vue-big-plan","content:en-us:blog:vue-big-plan.yml","Vue Big Plan","en-us/blog/vue-big-plan.yml","en-us/blog/vue-big-plan",{"_path":8427,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8428,"content":8433,"config":8437,"_id":8439,"_type":16,"title":8440,"_source":17,"_file":8441,"_stem":8442,"_extension":20},"/en-us/blog/gitlab-dot-com-database-incident",{"title":8429,"description":8430,"ogTitle":8429,"ogDescription":8430,"noIndex":6,"ogImage":2478,"ogUrl":8431,"ogSiteName":692,"ogType":693,"canonicalUrls":8431,"schema":8432},"GitLab.com database incident","Yesterday we had a serious incident with one of our databases. We lost six hours of database data (issues, merge requests, users, comments, snippets, etc.) for GitLab.com.","https://about.gitlab.com/blog/gitlab-dot-com-database-incident","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab.com database incident\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2017-02-01\",\n      }",{"title":8429,"description":8430,"authors":8434,"heroImage":2478,"date":8435,"body":8436,"category":14},[890],"2017-02-01","\n\nUpdate: please see [our postmortem for this incident](/blog/postmortem-of-database-outage-of-january-31/)\n\nYesterday we had a serious incident with one of our databases. We lost six hours of database data (issues, merge requests, users, comments, snippets, etc.) for GitLab.com. Git/wiki repositories and self-managed installations were not affected. Losing production data is unacceptable and in a few days we'll publish a post on why this happened and a list of measures we will implement to prevent it happening again.\n\n_**Update 6:14pm UTC: GitLab.com is back online**_\n\n\u003C!-- more -->\n\nAs of time of writing, we’re restoring data from a six-hour-old backup of our database. This means that any data between 5:20pm UTC and 11:25pm UTC from the database (projects, issues, merge requests, users, comments, snippets, etc.) is lost by the time GitLab.com is live again.\n\n**Git data (repositories and wikis) and self-managed instances of GitLab are not affected.**\n\nRead below for a brief summary of the events. You’re also welcome to view [our active postmortem doc](https://docs.google.com/document/d/1GCK53YDcBWQveod9kfzW-VCxIABGiryG7_z_6jHdVik/pub).\n\n## First incident\n\n\nAt 2017/01/31 6pm UTC, we detected that spammers were hammering the database by creating snippets, making it unstable. We then started troubleshooting to understand what the problem was and how to fight it.\n\n\n\n![](https://about.gitlab.comdb_incident/snippets.png)\n\nAt 2017/01/31 9pm UTC, this escalated, causing a lockup on writes on the database, which caused some downtime.\n\n![](https://about.gitlab.comdb_incident/locks.png)\n\n\n\n### Actions taken\n\n- We blocked the spammers based on IP address\n- We removed a user for using a repository as some form of CDN, resulting in 47 000 IPs signing in using the same account (causing high DB load)\n- We removed users for spamming (by creating snippets)\n\n## Second incident\n\nAt 2017/01/31 10pm UTC, we got paged because DB Replication lagged too far behind, effectively stopping. This happened because there was a spike in writes that were not processed ontime by the secondary database.\n\n![](https://about.gitlab.comdb_incident/used.png)\n\n\n![](https://about.gitlab.comdb_incident/rep_lag.png)\n\n### Actions taken\n\n- Attempt to fix `db2`, it’s lagging behind by about 4 GB at this point\n- `db2.cluster` refuses to replicate, `/var/opt/gitlab/postgresql/data` is wiped to ensure a clean replication\n- `db2.cluster` refuses to connect to `db1`, complaining about `max_wal_senders` being too low. This setting is used to limit the number of `WAL (= replication)` clients\n- _Team-member-1_ adjusts `max_wal_senders` to `32` on `db1`, restarts PostgreSQL\n- PostgreSQL complains about too many semaphores being open, refusing to start\n- _Team-member-1_ adjusts `max_connections` to `2000` from `8000`, PostgreSQL starts again (despite `8000` having been used for almost a year)\n- `db2.cluster` still refuses to replicate, though it no longer complains about connections; instead it just hangs there not doing anything\n- At this point frustration begins to kick in. Earlier this night _team-member-1_ explicitly mentioned he was going to sign off as it was getting late (23:00 or so local time), but didn’t due to the replication problems popping up all of a sudden.\n\n## Third incident\n\nAt 2017/01/31 11pm-ish UTC, _team-member-1_ thinks that perhaps `pg_basebackup` is refusing to work due to the PostgreSQL data directory being present (despite being empty), decides to remove the directory. After a second or two he notices he ran it on `db1.cluster.gitlab.com`, instead of `db2.cluster.gitlab.com`.\n\nAt 2017/01/31 11:27pm UTC, _team-member-1_ - terminates the removal, but it’s too late. Of around 300 GB only about 4.5 GB is left.\n\nWe had to bring GitLab.com down and shared this information on Twitter:\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">We are performing emergency database maintenance, \u003Ca href=\"https://t.co/r11UmmDLDE\">https://t.co/r11UmmDLDE\u003C/a> will be taken offline\u003C/p>&mdash; GitLab.com Status (@gitlabstatus) \u003Ca href=\"https://twitter.com/gitlabstatus/status/826572933304827904\">January 31, 2017\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"//platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n## Problems encountered\n\n- LVM snapshots are by default only taken once every 24 hours. _Team-member-1_ happened to run one manually about six hours prior to the outage because he was working in load balancing for the database.\n- Regular backups seem to also only be taken once per 24 hours, though _team-member-1_ has not yet been able to figure out where they are stored. According to _team-member-2_ these don’t appear to be working, producing files only a few bytes in size.\n- _Team-member-3_: It looks like `pg_dump` may be failing because PostgreSQL 9.2 binaries are being run instead of 9.6 binaries. This happens because omnibus only uses Pg 9.6 if data/PG_VERSION is set to 9.6, but on workers this file does not exist. As a result it defaults to 9.2, failing silently. No SQL dumps were made as a result. Fog gem may have cleaned out older backups.\n- Disk snapshots in Azure are enabled for the NFS server, but not for the DB servers.\n- The synchronisation process removes webhooks once it has synchronised data to staging. Unless we can pull these from a regular backup from the past 24 hours they will be lost\n- The replication procedure is super fragile, prone to error, relies on a handful of random shell scripts, and is badly documented\n- Our backups to S3 apparently don’t work either: the bucket is empty\n- So in other words, out of five backup/replication techniques deployed none are working reliably or set up in the first place. We ended up restoring a six-hour-old backup.\n- pg_basebackup will silently wait for a master to initiate the replication progress, according to another production engineer this can take up to 10 minutes. This can lead to one thinking the process is stuck somehow. Running the process using “strace” provided no useful information about what might be going on.\n\n## Recovery\n\n\nWe’re working on recovering right now by using a backup of the database from a staging database.\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">We accidentally deleted production data and might have to restore from backup. Google Doc with live notes \u003Ca href=\"https://t.co/EVRbHzYlk8\">https://t.co/EVRbHzYlk8\u003C/a>\u003C/p>&mdash; GitLab.com Status (@gitlabstatus) \u003Ca href=\"https://twitter.com/gitlabstatus/status/826591961444384768\">February 1, 2017\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"//platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n- 2017/02/01 00:36 - Backup `db1.staging.gitlab.com` data\n- 2017/02/01 00:55 - Mount `db1.staging.gitlab.com` on `db1.cluster.gitlab.com`\n- Copy data from staging `/var/opt/gitlab/postgresql/data/` to production `/var/opt/gitlab/postgresql/data/`\n- 2017/02/01 01:05 - `nfs-share01` server commandeered as temp storage place in `/var/opt/gitlab/db-meltdown`\n- 2017/02/01 01:18 - Copy of remaining production data, including `pg_xlog` tar’ed up as `20170131-db-meltodwn-backup.tar.gz`\n\n\nBelow a graph showing the time of deletion and subsequent copying in of data.\n\n![](https://about.gitlab.comdb_incident/delete.png)\n\n[Also, we'd like to thank everyone for the amazing support we've received on Twitter and elsewhere through #hugops](https://twitter.com/i/moments/826818668948549632)\n",{"slug":8438,"featured":6,"template":678},"gitlab-dot-com-database-incident","content:en-us:blog:gitlab-dot-com-database-incident.yml","Gitlab Dot Com Database Incident","en-us/blog/gitlab-dot-com-database-incident.yml","en-us/blog/gitlab-dot-com-database-incident",{"_path":8444,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8445,"content":8451,"config":8455,"_id":8457,"_type":16,"title":8458,"_source":17,"_file":8459,"_stem":8460,"_extension":20},"/en-us/blog/getting-started-with-git-lfs-tutorial",{"title":8446,"description":8447,"ogTitle":8446,"ogDescription":8447,"noIndex":6,"ogImage":8448,"ogUrl":8449,"ogSiteName":692,"ogType":693,"canonicalUrls":8449,"schema":8450},"Getting started with Git LFS","Managing large files efficiently with Git LFS","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683977/Blog/Hero%20Images/lfs-website.png","https://about.gitlab.com/blog/getting-started-with-git-lfs-tutorial","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Getting started with Git LFS\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tobias Günther\"}],\n        \"datePublished\": \"2017-01-30\",\n      }",{"title":8446,"description":8447,"authors":8452,"heroImage":8448,"date":8453,"body":8454,"category":14},[4729],"2017-01-30","\n\nIt happens with the best of intentions: your design team adds their large graphic files to your project repository - and you see it grow and grow until it's a multi-gigabyte clump...\n\n\u003C!--more-->\n\nWorking with large binary files in Git can indeed be tricky.\nEvery time a tiny change in a 100 MB Photoshop file is committed, your repository grows by another 100 MB.\nThis quickly adds up and makes your repository almost unusable due to its enormous size.\n\nBut of course, _not_ using [version control](/topics/version-control/) for your design / concept / movie / audio / executables / &lt;other-large-file-use-case&gt; work _cannot_ be the solution.\nThe general benefits of version control still apply and should be reaped in all kinds of projects.\n\nLuckily, there's a Git extension that makes working with large files a lot more efficient: say hello to \"[Large File Storage](https://git-lfs.github.com/)\" (or simply \"LFS\" if you prefer nicknames).\n\n## Without LFS: Bloated repositories\n\nBefore we look at how exactly LFS works its wonders, we'll take a closer look at the actual problem.\nLet's consider a simple website project as an example:\n\n![A simple project setup](https://about.gitlab.com/images/blogimages/getting-started-with-git-lfs-tutorial/project-setup-without-big-files.png){:.shadow}\n\nNothing special: some HTML, CSS, and JS files and a couple of small image assets.\nHowever, until now, we haven't included our design assets (Photoshop, Sketch, etc.).\nIt makes a lot of sense to put your design assets under version control, too.\n\n![Big binary files in a project](https://about.gitlab.com/images/blogimages/getting-started-with-git-lfs-tutorial/project-setup-with-big-files.png){:.shadow}\n\nHowever, here's the catch: each time our designer makes a change (no matter how small) to this new Photoshop file, she will commit another 100 MB to the repository.\nVery quickly, the repository will weigh tons of megabytes and soon gigabytes - which makes cloning and managing it very tedious.\n{: .alert .alert-info}\n\nAlthough I only talked about \"design\" files, this is really a problem with all \"large\" files:\nmovies, audio recordings, datasets, etc.\n\n## With LFS: Efficient large file handling\n\nOf course, LFS cannot simply \"magic away\" all that large data: it accrues with every change and has to be saved.\nHowever, it shifts that burden to the remote server - allowing the _local_ repository to stay relatively lean!\n\nTo make this possible, LFS uses a simple trick: **it does not keep all of a file's versions in the local repository**.\nInstead, it provides only the files that are necessary in the checked out revision, on demand.\n\nBut this poses an interesting question: if those huge files themselves are _not_ present in your local repository... what _is_ present instead?\n[LFS saves lightweight pointers](https://www.git-tower.com/learn/git/ebook/en/desktop-gui/advanced-topics/git-lfs?utm_source=gitlab-blog&utm_campaign=GitLab%20LFS&utm_medium=guest-post) in place of real file data. When you check out a revision with such a pointer, LFS simply looks up the original file (possibly on the server if it's not in its own, special cache) and downloads it for you.\n\nThereby, you end up with only the files you really want - not a whole bunch of superfluous data that you might never need.\n\n## Installing LFS\n\nLFS is not (yet) part of the core Git binary, but it's available as an extension.\nThis means that, before we can work with LFS, we need to make sure it's installed.\n\n#### Server\n\nNot all code hosting services support LFS already. As a GitLab user, however, there's not much to worry about:\nif you're using GitLab.com or a halfway recent version of GitLab CE or EE, [support for LFS is already baked in](https://docs.gitlab.com/ee/topics/git/lfs/index.html)!\nYour administrator only need to [enable the LFS option](https://docs.gitlab.com/ee/administration/lfs/index.html).\n\n#### Local machine\n\nYour local Git installation also needs to support LFS.\nIf you're using [Tower](https://www.git-tower.com/?utm_source=gitlab-blog&utm_campaign=GitLab%20LFS&utm_medium=guest-post), a Git desktop client, you don't have to install anything: Tower supports the Git Large File System out of the box.\n\nIf you're using Git on the command line, there are different installation options available to you:\n\n- Binary Packages: Up-to-date [binary packages](https://github.com/git-lfs/git-lfs/releases) are available for Windows, Mac, Linux, and FreeBSD.\n- Linux: Packages for Debian and RPM are available from [PackageCloud](https://packagecloud.io/github/git-lfs/install).\n- macOS: You can use [Homebrew](https://github.com/Homebrew/brew) via \"brew install git-lfs\" or [MacPorts](https://www.macports.org) via \"port install git-lfs\".\n- Windows: You can use the [Chocolatey](https://chocolatey.org/) package manager via \"choco install git-lfs\".\n\nAfter your package manager has finished its work, you need to complete the installation with the \"lfs install\" command:\n\n```\ngit lfs install\n```\n\n## Tracking files with LFS\n\nWithout further instructions, LFS won't take care of your large file problems.\nWe'll have to tell LFS explicitly which files it should handle!\n\nSo let's return to our \"big Photoshop file\" example. We can instruct LFS to take care of the \"design.psd\" file using the \"lfs track\" command:\n\n```\ngit lfs track \"design-resources/design.psd\"\n```\n\nAt first glance, the command didn't seem to have much effect. However, you'll notice that a new file in the project's root folder has been created (or changed, if it already existed): `.gitattributes` collects all file patterns that we choose to track via LFS. Let's take a look at its contents:\n\n```\ncat .gitattributes\ndesign-resources/design.psd filter=lfs diff=lfs merge=lfs -text\n```\n\nPerfect! From now on, LFS will handle this file. We can now go ahead and add it to the repository in the way we're used to.\nNotice that any changes to `.gitattributes` also have to be committed to the repository, just like other modifications:\n\n```\ngit add .gitattributes\ngit add design-resources/design.psd\ngit commit -m \"Add design file\"\n```\n\n## Tracking file patterns\n\nAdding a specific, single file like this is all well and good... but what if you want to track, for example, _every_ `.indd` file in our project?\nPlease relax: you don't have to add each file manually! LFS allows you to define file patterns, much like when ignoring files.\nThe following command, for example, will instruct LFS to track all _InDesign_ files - existing ones and future ones:\n\n```\ngit lfs track \"*.indd\"\n```\n\nYou could also tell LFS to track the contents of a whole directory:\n\n```\ngit lfs track \"design-assets/*\"\n```\n\n## Getting an overview of tracked files\n\nAt some point, you might want to know which files exactly are tracked by LFS at the moment.\nYou could simply take a look at the `.gitattributes` file. However, these are not _actual_ files, but only rules and therefore highly \"theoretical\": individual files might have slipped through, e.g. due to typos or overly restrictive rules.\n\nTo see a list of the _actual_ files that you're currently tracking, simply use the `git lfs ls-files` command:\n\n```\ngit lfs ls-files\n194dcdb603 * design-resources/design.psd\n```\t\t\n\n## Track as early as possible\n\nRemember that LFS does _not_ change the laws of nature: things that were committed to the repository are there to stay.\nIt's very hard (and dangerous) to change a project's commit history.\n\nThis means that you should tell LFS to track a file _before_ it's committed to the repository.\n{: .alert .alert-info}\n\nOtherwise, it has become part of your project's history - including all of its megabytes and gigabytes...\n\nThe ideal moment to configure which file patterns you want to track is right when initializing a repository (just like with [ignoring files](https://www.git-tower.com/learn/git/ebook/en/desktop-gui/basics/starting-with-an-unversioned-project?utm_source=gitlab-blog&utm_campaign=GitLab%20LFS&utm_medium=guest-post#chapter_ignoring+files)).\n\n## Using LFS in a GUI\n\nAlthough LFS is not difficult to use, there are still commands to remember and things to mess up.\nIf you want to be more productive with Git (and LFS), have a look at [Tower](https://www.git-tower.com/?utm_source=gitlab-blog&utm_campaign=GitLab%20LFS&utm_medium=guest-post), a Git desktop client for Mac and Windows.\nSince Tower comes with built-in support for Git LFS, there is nothing to install. The app has been around for several years and is trusted by over 80,000 users all over the world.\n\n![Using Tower to be more productive with Git and Git LFS](https://about.gitlab.com/images/blogimages/getting-started-with-git-lfs-tutorial/tower-lfs.gif)\n\nAdditionally, Tower provides a direct [integration with GitLab](/blog/gitlab-tower-integration-coupon-code/)! After connecting your GitLab account in Tower, you can clone and create repositories with just a single click.\n\n## Working with Git\n\nA great aspect of LFS is that you can maintain your normal Git workflow: staging, committing, pushing, pulling and everything else works just like before.\nApart from the commands we've discussed, there's nothing to watch out for.\n\nLFS will provide the files you need, _when_ you need them.\n\nIn case you're looking for more information about LFS, have a look at this free [online book](https://www.git-tower.com/learn/git/ebook/en/desktop-gui/advanced-topics/git-lfs?utm_source=gitlab-blog&utm_campaign=GitLab%20LFS&utm_medium=guest-post).\nFor general insights about Git, take a look at the [Git Tips & Tricks](/blog/git-tips-and-tricks/) blog post and Tower's [video series](https://www.git-tower.com/learn/git/videos?utm_source=gitlab-blog&utm_campaign=GitLab%20LFS&utm_medium=guest-post).\n\n## About Guest Author\n\nThis is a [guest post](/handbook/marketing/blog/#guest-posts)\nwritten by [Tobias Günther](https://twitter.com/gntr), who is part of the team behind the [Tower Git client](https://www.git-tower.com/?utm_source=gitlab-blog&utm_campaign=GitLab%20LFS&utm_medium=guest-post).\n\nCover image: screenshot of [Git LFS](https://git-lfs.github.com/)\n{:.note}\n",{"slug":8456,"featured":6,"template":678},"getting-started-with-git-lfs-tutorial","content:en-us:blog:getting-started-with-git-lfs-tutorial.yml","Getting Started With Git Lfs Tutorial","en-us/blog/getting-started-with-git-lfs-tutorial.yml","en-us/blog/getting-started-with-git-lfs-tutorial",{"_path":8462,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8463,"content":8469,"config":8473,"_id":8475,"_type":16,"title":8476,"_source":17,"_file":8477,"_stem":8478,"_extension":20},"/en-us/blog/designing-for-the-modern-developer-recap",{"title":8464,"description":8465,"ogTitle":8464,"ogDescription":8465,"noIndex":6,"ogImage":8466,"ogUrl":8467,"ogSiteName":692,"ogType":693,"canonicalUrls":8467,"schema":8468},"Designing for the modern developer","Recap and recording from our recent webcast featuring the GitLab user experience (UX) team","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683970/Blog/Hero%20Images/designing-for-the-modern-developer.jpg","https://about.gitlab.com/blog/designing-for-the-modern-developer-recap","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Designing for the modern developer\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emily von Hoffmann\"}],\n        \"datePublished\": \"2017-01-24\",\n      }",{"title":8464,"description":8465,"authors":8470,"heroImage":8466,"date":8471,"body":8472,"category":14},[6728],"2017-01-24","\n\nWe're proud of how each team within the company delivers to make us better for every monthly release. Because GitLab spans the entire software development lifecycle, our UX team routinely tackles a series of unique creative challenges, which they discussed in depth in our recent webcast, \"Designing for the Modern Developer.\"\n\nUX Lead Allison Whilden and UX Researcher Sarah O'Donnell chat about how the team designs the interface, responds to feedback, and helps fit new features into our understanding of the needs of all users.\n\nWatch the recording and get the highlights below.\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://www.youtube.com/embed/nnL48m0m4qo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## Highlights\n\n### How we make design decisions\n\n>  At every stage, we carefully think through why decisions are made, and for whom. We have a number of methods that help us do this: research, testing, surveys, working with internal devs, and carrying on a constant conversation with our community.\n\n### Users of our own design\n\n>  We’re pretty unique in that every day, we use our own product. This gives us an intimate understanding of how the software is used. It empowers us as users with opinions, and also forces us to be extra thoughtful about removing bias from our evaluation of the UX. We take seriously that we need to represent all opinions, not just our own. UX is about calculated decisions, not anyone’s idea about what they like best. We naturally adopt a sort of split brain when using the software and observing user behavior--we empathize both as users and technical experts.\n\n### How we work with Engineering and Product\n\n>  Being a remote startup, we have a rather unique challenge of building a shared vision and perspective for UX while we are scattered across the world, and across features of the product. We’re working on this by documenting everything in our [UX Guide](https://docs.gitlab.com/ee/development/ux_guide/index.html), which you’re free to read through. More and more of the industry is becoming remote, so we’re excited to be experimenting with ways to stay connected. We believe that what we learn will be increasingly valuable to other companies. We navigate our relationship with the product team by understanding that they are both users & builders, too.\n\n### We're sensitive to different needs and work styles\n\n>  We’re opinionated - we have a GitLab workflow, which we use at GitLab, the company, but we understand that not everyone works the way we do. Some of our company values, like extreme transparency, need to be made “optional” for users in other companies, for example when we think about how permissions work.\nWe also build things that we ourselves don’t currently need.\n\n### How we balance feedback\n\n>  In order to keep up with releases and deliver minimum viable change, we need to amplify some voices. When we have to prioritize, we address issues from our Enterprise customers first, then from our outside community, and then from developers on our team.\n\n### Our UX research interests\n\n>  We want to build an understanding of who our users are and how they work. While we get a lot of great feedback from the community that is really helpful, we aren't always sure we are capturing the full range of our target users. We're developing personas that will allow us to relate to different types of users and subsequently predict their behavior. This is essential for our key challenge of creating a unified experience for teams big and small, so they can stay focused on their own goals.\n>  \n\n\nYou can learn more about this in our recent [blog post](/blog/the-importance-of-ux-personas/)!\n\n\nCover image: \"[sampa](https://www.flickr.com/photos/hernaniarruda/16024464453)\" by [Hernani Arruda Monteiro da Silva](https://www.flickr.com/photos/hernaniarruda/) is licensed under [CC BY 2.0](https://creativecommons.org/licenses/by/2.0/legalcode).\n{:.note}\n",{"slug":8474,"featured":6,"template":678},"designing-for-the-modern-developer-recap","content:en-us:blog:designing-for-the-modern-developer-recap.yml","Designing For The Modern Developer Recap","en-us/blog/designing-for-the-modern-developer-recap.yml","en-us/blog/designing-for-the-modern-developer-recap",{"_path":8480,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8481,"content":8487,"config":8491,"_id":8493,"_type":16,"title":8494,"_source":17,"_file":8495,"_stem":8496,"_extension":20},"/en-us/blog/video-tutorial-idea-to-production-on-google-container-engine-gke",{"title":8482,"description":8483,"ogTitle":8482,"ogDescription":8483,"noIndex":6,"ogImage":8484,"ogUrl":8485,"ogSiteName":692,"ogType":693,"canonicalUrls":8485,"schema":8486},"Video tutorial: Idea to Production on Google Kubernetes Engine (GKE)","Watch the complete video tutorial to find out how you can take your team's productivity to the next level.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671572/Blog/Hero%20Images/idea-to-production-gke.jpg","https://about.gitlab.com/blog/video-tutorial-idea-to-production-on-google-container-engine-gke","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Video tutorial: Idea to Production on Google Kubernetes Engine (GKE)\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sean Packham\"}],\n        \"datePublished\": \"2017-01-23\",\n      }",{"title":8482,"description":8483,"authors":8488,"heroImage":8484,"date":8489,"body":8490,"category":14},[8362],"2017-01-23","\n\nWith GitLab 8.16 you can deploy GitLab straight to Google Kubernetes Engine (GKE) and go from Idea to Production in about 20 minutes, with auto-scaling CI, auto deploy, Mattermost, and a private Docker registry all on your own Kubernetes cluster. Watch the complete video tutorial to find out how you can take your team's productivity to the next level.\n\n\u003C!--more-->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/3A8mdJl_icM\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\nFor further instructions, please take a look at the [project](https://gitlab.com/gitlab-org/kubernetes-gitlab-demo).\n\n\u003Cp class=\"alert alert-orange\" style=\"background-color: rgba(252,163,38,.3); border-color: rgba(252,163,38,.3); color: rgb(226,67,41) !important; text-align: center;\">For more about our latest release, catch our upcoming webcast about &nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i> &nbsp;&nbsp;\u003Cstrong>GitLab 8.16\u003C/strong> &nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n&nbsp;&nbsp;on January 26. \u003Ca style=\"color: rgb(107,79,187);\" href=\"https://page.gitlab.com/20170126_autodeploy_autodeploywebterminal.html\">Register here\u003C/a>!\u003C/p>\n\nImage: \"[Containers](https://www.flickr.com/photos/jumilla/14403331148/in/photolist-nWLQxE-an9FYm-6eVnJP-iYjsv-iEoK39-iYjss-82XJH-9at6Z7-iYjsr-bW3NeL-9gYg6a-njSrwT-eDjxt-6AMx91-o4RkR-6rJaN3-e7BQt-66fJRR-28rg3L-GjMFdK-8LXD1-fs2WCj-4LGK2-a3NYdi-2UrVHv-2UrYz4-2Us2tK-Eeqeo-o9rBXe-hD9cR2-nS6Sax-rF6SGG-dcGZAo-3EK4k-aoHuTF-2AzAVJ-boaQy8-u8Bei-diPqb8-f3ZCWg-61fNWq-QWgZH-fJYrjR-axYxm-shkJMv-85HX8j-fKfUKQ-5aGWYj-piD2pu-7YmaKY)\" by [Jumilla](https://www.flickr.com/photos/jumilla/) is licensed under [CC BY 2.0](https://creativecommons.org/licenses/by/2.0/).\n{: .note}\n\n\u003Cp> For information on how to replicate this demo yourself please see our \u003Ca href=\"https://gitlab.highspot.com/spots/615dd7e3911d70c4887812a7\">demo page\u003C/a>.\u003C/p>\n",{"slug":8492,"featured":6,"template":678},"video-tutorial-idea-to-production-on-google-container-engine-gke","content:en-us:blog:video-tutorial-idea-to-production-on-google-container-engine-gke.yml","Video Tutorial Idea To Production On Google Container Engine Gke","en-us/blog/video-tutorial-idea-to-production-on-google-container-engine-gke.yml","en-us/blog/video-tutorial-idea-to-production-on-google-container-engine-gke",{"_path":8498,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8499,"content":8505,"config":8509,"_id":8511,"_type":16,"title":8512,"_source":17,"_file":8513,"_stem":8514,"_extension":20},"/en-us/blog/the-importance-of-ux-personas",{"title":8500,"description":8501,"ogTitle":8500,"ogDescription":8501,"noIndex":6,"ogImage":8502,"ogUrl":8503,"ogSiteName":692,"ogType":693,"canonicalUrls":8503,"schema":8504},"Why we use personas in product development","Our User Experience (UX) Researcher explains what personas are and how they change the way teams work","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683959/Blog/Hero%20Images/the-importance-of-ux-personas.jpg","https://about.gitlab.com/blog/the-importance-of-ux-personas","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we use personas in product development\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarah O’Donnell\"}],\n        \"datePublished\": \"2017-01-09\",\n      }",{"title":8500,"description":8501,"authors":8506,"heroImage":8502,"date":8507,"body":8508,"category":14},[6197],"2017-01-09","\n\nWhen developing a product, it’s easy to assume that users are just like ourselves. If we rely on our assumptions to lead what and how we develop, there is a risk of creating a product that may only work for a segment of users. A more analytical approach is necessary for understanding the needs of all users, which is paramount to successful user experience (UX).\n\n\u003C!--more-->\n\nEvery month, we piece together the jigsaw of new features and design tweaks, and all the tools that ship with GitLab, ensuring that each change fits within our vision and understanding of user needs. To help with this task, we're developing personas that will allow us to relate to different types of users and subsequently predict their behavior. This is essential for our key challenge of creating a unified experience for teams big and small, so they can stay focused on their own goals. In this post, I'll explore some of the reasons to use personas in product development.\n\n## What is a persona?\n\nPersonas are fictional characters created to represent the major needs and expectations of the different types of users that use a website, product or service. A persona typically has a name, a picture, and a background, along with demographic information such as age, highest level of education and work experience.\n\nImagine the last time you were engrossed in a television series. It’s likely that you connected with one or more of the leading characters because you could understand - and on some level, relate to - their emotions, responses and actions in a given situation. You probably discussed the television series with a friend and together, you may have guessed at future plot lines based on the events that had occurred so far in the series. Personas work in a similar way.\n\n## What can personas reveal?\n\n A good persona generates empathy for users by putting a “human face” on data. Its aim is to summarize and share research findings with anybody contributing towards the success of the product. It’s easier for an individual (regardless of their job title) to understand a persona that collectively represents the motivations, frustrations and goals of thousands of users, than it is to trawl through days, or even months' worth, of research findings.\n\n Personas provide a clear understanding of why and how a person is using your product, and documents any pain points they experience, both in the context of using the product and the environment in which the product is used. Finally, it summarizes their goals: what do they hope to solve or achieve by using your product? This information is normally collected by using both qualitative (surveys, user interviews, etc.) and quantitative research techniques (web analytics). While a persona itself is fictional, it is formed using factual data to provide a realistic model of user needs.\n\n## How do personas change the way teams work?\n\n Personas promote further discussions within a team about how a user would interact with a proposed idea. By internalizing the user, we adopt their mentality and form solutions based on what’s best for the user and their given situation.\n\n By defining personas, there is a well-documented, clear focus on who the product is for, which stops users’ needs from being altered to suit an idea or concept. A product manager may use personas to validate and prioritize features, whereas a designer may use them to determine the overall visual style of a product. When everybody within a team has a shared understanding of users, disagreements surrounding product development are reduced, as there is greater consensus about what is right for users.\n\n## What is the state of personas at GitLab?\n\nAt GitLab, we are in the early stages of discovering who our personas are. If you want to make sure that your needs and expectations as a user are met, then you can help us by completing our survey (survey now closed) and sharing your views.\n\nThe qualitative data from this survey will be analyzed and categorized by coding similar responses. A code is a word or short phrase that describes a respondent’s answer, and serves to condense the information collected into key themes and topics. We'll use statistical analysis to summarize and describe the quantitative data (close-ended questions) in the survey, and we can interpret the collective findings to form a basis for our personas.\n\nBecause it has some limitations, we interpret survey data alongside user interviews and web analytics. This helps us compare findings and make the strongest conclusions possible. We're looking forward to learning about how you use GitLab!\n\n_Tweet [@GitLab](https://twitter.com/gitlab) and check out our [job openings](/jobs/)._\n\n",{"slug":8510,"featured":6,"template":678},"the-importance-of-ux-personas","content:en-us:blog:the-importance-of-ux-personas.yml","The Importance Of Ux Personas","en-us/blog/the-importance-of-ux-personas.yml","en-us/blog/the-importance-of-ux-personas",{"_path":8516,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8517,"content":8523,"config":8528,"_id":8530,"_type":16,"title":8531,"_source":17,"_file":8532,"_stem":8533,"_extension":20},"/en-us/blog/behind-the-scenes-how-we-built-review-apps",{"title":8518,"description":8519,"ogTitle":8518,"ogDescription":8519,"noIndex":6,"ogImage":8520,"ogUrl":8521,"ogSiteName":692,"ogType":693,"canonicalUrls":8521,"schema":8522},"Behind the scenes: How we built Review Apps","GitLab's Head of Product shares an inside look at iterating on one of our latest features","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668402/Blog/Hero%20Images/code-gitlab-tanuki.png","https://about.gitlab.com/blog/behind-the-scenes-how-we-built-review-apps","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Behind the scenes: How we built Review Apps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Pundsack\"}],\n        \"datePublished\": \"2017-01-04\",\n      }",{"title":8518,"description":8519,"authors":8524,"heroImage":8520,"date":8526,"body":8527,"category":14},[8525],"Mark Pundsack","2017-01-04","\n\nA bunch of us on the GitLab team have known for a while just how important review apps are. Even though this wasn’t something that a lot of customers asked for, we knew we had to tackle it because of how we'd seen it transform a developer's flow. We also knew that tightly integrating it into GitLab would make it even better. Although our aspirations for the feature started out gigantic and magical, we ultimately constrained them to the practical and concrete. Here's a behind-the-scenes look at how we iterated and shipped [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/) over the last 3 releases.\n\n\u003C!-- more -->\n\nFull disclosure: I used to work at Heroku on the team that shipped [Heroku Review Apps](https://devcenter.heroku.com/articles/github-integration-review-apps), and some of that work was inspired by a tool called [Fourchette](https://github.com/rainforestapp/fourchette), which was created by the great folks at [Rainforest QA](https://www.rainforestqa.com/). Even outside of my personal bias, our CEO, CI Lead and others had seen things like this elsewhere and saw how transformative it could be.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/CteZol_7pxo?start=1713\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\nThere are a ton of different ways we could have shipped it. We started months ago, mostly discussing asynchronously on GitLab issues, with big ideas that made Review Apps seem kind of daunting. We had ideas for black magic to detect Kubernetes settings, configure all the review app stuff for you, make them work only for merge requests rather than for every branch, etc. It felt like something that might not ship for months, if not years, because of all the complexity and dependencies.\n\nBut then a few of us got together to see how we could simplify, starting with a written proposal, then collaborating in a Google Doc, then a live chat over Google Hangouts, and we came up with what we felt would be the smallest thing we could do to enable the functionality. We shared that proposal back on the public issue. After a couple days, we pushed it even further and really cut the scope.\n\nHere are a few links to some of the issues we went through, starting with a large meta issue, down to a concrete proposal and then counterproposal, until finally, the winning proposal emerged:\n\n* [#3286](https://gitlab.com/gitlab-org/gitlab-ce/issues/3286) - [Epic] GitLab Deploy, _opened 1 year ago_\n* [#14698](https://gitlab.com/gitlab-org/gitlab-ce/issues/14698) - Container scheduler for 4 use cases, _9 months ago_\n* [#20255](https://gitlab.com/gitlab-org/gitlab-ce/issues/20255) - [Meta] Review Apps, _5 months ago_\n* [#20054](https://gitlab.com/gitlab-org/gitlab-ce/issues/20054) - Review Apps as Runner job, _5 months ago_\n* [#21411](https://gitlab.com/gitlab-org/gitlab-ce/issues/21411) - How do we do deploys, _4 months ago_\n* [#21971](https://gitlab.com/gitlab-org/gitlab-ce/issues/21971) - Dynamic environments aka Review Apps, _3 months ago_\n\n## 8.12\n\nWe initially offered experimental support for Review Apps in GitLab 8.12. At that point, we had reduced it to just one or two seemingly small changes to the `.gitlab-ci.yml` format. Specifically, we let you specify the URL of an environment in `.gitlab-ci.yml` (rather than just in the web UI), and we let you use variables within the environment name and URL. Trivial, right? One extra keyword and another small change enabled environments to now be “dynamic,” which is the core of Review Apps.\n\n```yaml\nreview_apps:\n  environment:\n    name: review/$CI_BUILD_REF_NAME\n    url: http://$CI_BUILD_REF_NAME.review.gitlab.com/\n```\n\n## 8.13\n\nThen in 8.13 we implemented another key piece: the ability to delete or stop apps. Again, there were all sorts of complex ideas for how to solve this, but we settled on the smallest change possible that enabled the feature. In this case, that was reusing our existing concept of manual actions, or jobs that run in a pipeline only when a user triggers them manually from the web UI. So we said, if you can script how to delete your app, just create a manual action job for it. Then we added a new keyword in `.gitlab-ci.yml` so you could identify which of these jobs stopped the environment, and we displayed a different UI for that - now you get a little square stop button instead of the triangle play button. Again, pretty trivial.\n\n```yaml\nreview:\n  environment:\n    name: review/$app\n    on_stop: stop_review\n\nstop_review:\n  script: echo Delete My App\n  when: manual\n  environment:\n    name: review/$app\n    action: stop\n```\n\n## 8.14\n\nMost recently, in the 8.14 release, we made it so that we automatically detect when a branch is deleted, and run that manual action automatically for you. We also realized that with tons of Review Apps, your environments list might get unmanageable. To mitigate this, we came up with the convention that if you named your review app starting with a common name and then a slash, we’d treat that like a folder by which to group your apps, so the interface can show a bunch of Review Apps behind a collapsed folder. Once again, these are relatively small changes.\n\n* Auto-stop on branch delete\n* Folders in environment list\n\n## Wrap up\n\nUltimately this really complex, life changing feature was broken down into 3 releases of the minimal viable change.\n\nWhile we say Review Apps is now complete, it’s not finished. In fact, we have a saying that nothing is ever finished because we’re always looking for the minimal change, and then iterating. By shipping smaller pieces, we not only deliver faster, but we learn from what’s been shipped, and then iterate smarter.\n\nWe’ve now got follow-on issues to look at simplifying the `.gitlab-ci.yml` syntax for Review Apps, and even adding back some of that magic we originally envisioned. We’ll continue to iterate, and your feedback is key to us shipping better.\n\n* [#25138](https://gitlab.com/gitlab-org/gitlab-ce/issues/25138) - Simplify `.gitlab-ci.yml` syntax for stopping Review Apps\n* [#24197](https://gitlab.com/gitlab-org/gitlab-ce/issues/24197) - Smart deploy\n* [#23580](https://gitlab.com/gitlab-org/gitlab-ce/issues/23580) - Auto deploy\n\n_Tweet us [@GitLab](https://twitter.com/gitlab), check out our [job openings](/jobs/), or add your questions and suggestions to our [issue tracker](https://gitlab.com/gitlab-org/gitlab-ce/issues)!_\n",{"slug":8529,"featured":6,"template":678},"behind-the-scenes-how-we-built-review-apps","content:en-us:blog:behind-the-scenes-how-we-built-review-apps.yml","Behind The Scenes How We Built Review Apps","en-us/blog/behind-the-scenes-how-we-built-review-apps.yml","en-us/blog/behind-the-scenes-how-we-built-review-apps",{"_path":8535,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8536,"content":8542,"config":8546,"_id":8548,"_type":16,"title":8549,"_source":17,"_file":8550,"_stem":8551,"_extension":20},"/en-us/blog/a-creative-agencys-gitlab-wishlist",{"title":8537,"description":8538,"ogTitle":8537,"ogDescription":8538,"noIndex":6,"ogImage":8539,"ogUrl":8540,"ogSiteName":692,"ogType":693,"canonicalUrls":8540,"schema":8541},"Customer story: A creative agency's GitLab wishlist","A Lukkien developer shares his team's challenges with Git and GitLab for their UX designs, and requests a few tweaks they'd find useful.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670681/Blog/Hero%20Images/a-creative-agencys-gitlab-wishlist.jpg","https://about.gitlab.com/blog/a-creative-agencys-gitlab-wishlist","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Customer story: A creative agency's GitLab wishlist\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emily von Hoffmann\"}],\n        \"datePublished\": \"2016-12-15\",\n      }",{"title":8537,"description":8538,"authors":8543,"heroImage":8539,"date":8544,"body":8545,"category":14},[6728],"2016-12-15","\n[Wouter van Kuipers](https://twitter.com/wvkuipers) is an engineer at [Lukkien](https://www.lukkien.com/), a creative agency that produces online media, photography, film, apps, CGI, and graphic design. His team currently works on a platform aimed at parents and healthcare professionals. They've used a combination of Jenkins and GitLab, although they are switching to GitLab CI for testing. He told me his team tends to use the collaboration tools of GitLab the most. Before GitLab, they used SVN, and ultimately decided on GitLab instead of a competitor because they needed to host on-premises for security reasons. Our service engineer [Lee Matos](https://twitter.com/leematos) sat down with Wouter to learn about how GitLab can help.\n\n\u003C!--more-->\n\nHere are some items discussed below and requested by the Lukkien team:\n\n* A view that will let you see changes over builds, and how builds are affected over time.  \n* Notifications around CI builds, so if there are any related tickets, those get updated as well.\n* Versioning for Photoshop and InDesign files.\n\n**Wouter:** We struggle as a team and company to find a good versioning system for our (UX) designs. Right now we create separate folders and label the versions of our InDesign and Photoshop files. We want to know the latest version, but also want to have a clear visual representation of the changes between versions. Is there any planning for tooling like that in GitLab in the (near) feature?\n{: .alert .alert-info}\n\n**Lee:** Frankly, this is on our dream feature list. I think everybody on our team wants to be able to version Photoshop and InDesign documents, but we don’t have a good solution for those files right now that's going to work smoothly. It looks like Adobe is getting into the versioning space for files like these, so there’s a silver lining here in that once Adobe solves that problem, we’ll probably do something similar quickly thereafter.\n\n**Wouter:** My next question is that we are hosting GitLab using a Docker setup, this works quite well but we are not sure if this will create limitations in the long run, for example if we want to use Mattermost in the future? My team has 8-10 developers, but we use the setup for all our teams, we’re all in the same GitLab instance. So that’s 100-120 developers.\n{: .alert .alert-info}\n\n**Lee:** That's what I would call GitLab Small/Medium-sized. As it stands, we think that will be fine, there are no limitations even with Mattermost and we don’t expect there to be any problems. Obviously if there are, we’ll explore that with you and figure it out. Our product team leads aren't aware of running GitLab in Docker at a big scale (1000+) – most of those clients are running it directly in a VM or Bare Metal. We feel that obviously Docker is the future so we need to find the answer to these questions. If you run into anything, please bring it up in an issue. And the same goes for Docker in Mattermost, we don’t expect anything different.\n\n**Wouter:** Finally, in what way does GitLab want to position itself in the long run compared to GitHub?\n{: .alert .alert-info}\n\n**Lee:** The best way to think about it, for me, is we are actually more like Atlassian. Our end goal is to build what Atlassian ended up with by acquiring the little pieces, by instead building those parts and making them 100 percent cohesive. So it’s more about building an end-to-end development tool that allows your team to work together and converse and go. Our buzz phrase at this point is \"From idea to production\", so we want to cover everything over that process, and make it faster, whatever you’re using GitLab for. That’s even our goal internally as well, so it excites me because we’re actually using GitLab to build it.\n\nI think GitHub is positioning itself as more of a core component, they see Git and code as the core thing that needs to be solved, and are leaving integrations up to the third parties. We have integrations and we see the value in them, but we want to build something that allows you to start making things work out of the box. Instead of saying \"You need to go buy Drone CI, you need to use Waffle.io, and need to wire them all up and read 10 different documentations to figure it out.\" We want that process to be as easy as possible.\n\nImage [\"Lightroom Preset Balloon Release\"](https://www.flickr.com/photos/lennykphotography/26687024535/in/photolist-GEeYCF-hUAGKL-nQCwXy-Emhqdz-HRUzeG-EeGxU4-p2KCQa-Eroe6z-e4BpVm-dcZWfj-mQnNTJ-atd2f5-DSYEyA-DSqqGk-DFXwUA-aHPQVk-GucZJZ-EDGjje-CS8FYi-rymZ62-EBjtSY-DSfzQT-avJQMx-aYtqkR-CztMC7-dTRM3q-EPK3hD-DpeasQ-f2hdPB-eRwBGC-EoaxPD-b18F74-9sd1No-bkNuRx-byvPzZ-hxRZyb-D7F1xM-EVqmsh-CVBJBa-9pnw9W-eBWbNx-ftZrun-DXtJuT-p8As5e-DWQhdR-bkNdg7-oQCcaJ-b3JagT-8VoF1U-cgzLCU) by [Lenny K Photography](https://www.flickr.com/photos/lennykphotography/) is licensed under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/legalcode).\n\n_We want you to ask us anything! If you're a user interested in sharing your story on our blog, please fill out this [form]( https://docs.google.com/a/gitlab.com/forms/d/1K8ZTS1QvSSPos6mVh1ol8ZyagInYctX3fb9eglzeK70/edit)  and we’ll get in touch!_\n\n_Tweet us [@GitLab](https://twitter.com/gitlab) and check out our [job openings](/jobs/)._\n",{"slug":8547,"featured":6,"template":678},"a-creative-agencys-gitlab-wishlist","content:en-us:blog:a-creative-agencys-gitlab-wishlist.yml","A Creative Agencys Gitlab Wishlist","en-us/blog/a-creative-agencys-gitlab-wishlist.yml","en-us/blog/a-creative-agencys-gitlab-wishlist",{"_path":8553,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8554,"content":8560,"config":8566,"_id":8568,"_type":16,"title":8569,"_source":17,"_file":8570,"_stem":8571,"_extension":20},"/en-us/blog/continuous-delivery-of-a-spring-boot-application-with-gitlab-ci-and-kubernetes",{"title":8555,"description":8556,"ogTitle":8555,"ogDescription":8556,"noIndex":6,"ogImage":8557,"ogUrl":8558,"ogSiteName":692,"ogType":693,"canonicalUrls":8558,"schema":8559},"Spring Boot delivery with GitLab CI and Kubernetes","Create a Continuous Delivery pipeline to deploy a Spring Boot app with GitLab CI and Kubernetes to Google Cloud Container Engine","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672314/Blog/Hero%20Images/dew-leaf.jpg","https://about.gitlab.com/blog/continuous-delivery-of-a-spring-boot-application-with-gitlab-ci-and-kubernetes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Continuous delivery of a Spring Boot application with GitLab CI and Kubernetes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Marco Lenzo\"}],\n        \"datePublished\": \"2016-12-14\",\n      }",{"title":8561,"description":8556,"authors":8562,"heroImage":8557,"date":8564,"body":8565,"category":14},"Continuous delivery of a Spring Boot application with GitLab CI and Kubernetes",[8563],"Marco Lenzo","2016-12-14","\n\n[Continuous integration, continuous deployment and continuous delivery](/topics/ci-cd/) are increasingly popular topics among modern development teams. Together they enable a team to build, test and deploy the source code at any commit. The main benefit of these approaches is the ability to release more quality code more frequently through the means of automated pipelines. The tough part is building such pipelines. There is a myriad of tools available which we would need to choose, learn, install, integrate, and maintain.\n\nRecently, I literally fell in love with [GitLab](https://gitlab.com/)! It offers a fully featured ecosystem of tools which enable us to create an automated pipeline in minutes! From source control to issue tracking and CI, we find everything under one roof, fully integrated and ready to use.\n\n\u003C!-- more -->\n\nIn this tutorial, we will create a [Spring Boot](https://projects.spring.io/spring-boot/) application built, tested, and deployed with [GitLab CI](/solutions/continuous-integration/) on a [Kubernetes](http://kubernetes.io/) cluster.\n\n## What are Spring Boot and Kubernetes?\n\nSpring Boot (sometimes called Java Spring Boot) is the leading [microservice chassis](http://microservices.io/patterns/microservice-chassis.html) for Java. It allows a developer to build a production-grade stand-alone application, like a typical [CRUD](https://en.wikipedia.org/wiki/Create,_read,_update_and_delete) application exposing a [RESTful API](https://en.wikipedia.org/wiki/Representational_state_transfer), with minimal configuration, reducing the learning curve required for using the [Spring Framework](https://spring.io/) drastically.\n\nKubernetes is an open-source container orchestrator inspired by [Google Borg](http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43438.pdf) that schedules, scales and manages containerized applications.\n\n\n## Create a GitLab project\n\nLet's start by [creating a new project](https://gitlab.com/projects/new) in GitLab named `actuator-sample`. Then we follow the command line instructions displayed in the project's home page to clone the repository on our machine and perform the first commit.\n\n```shell\ngit clone git@gitlab.com:marcolenzo/actuator-sample.git\ncd actuator-sample\ntouch README.md\ngit add README.md\ngit commit -m \"add README\"\ngit push -u origin master\n```\n\nAlways replace `marcolenzo` with your own GitLab username whenever copying a snippet of code from this tutorial.\n{: .alert .alert-info}\n\n## Create a Spring Boot application\n\nTo bootstrap the Spring Boot application we navigate to the [Spring Initializr](https://start.spring.io) web page and generate a **Maven Project** with the pre-selected Spring Boot **Version**. [Maven](https://maven.apache.org/index.html) is a project management tool commonly used in Java projects to define dependencies and the build lifecycle. We leave `com.example` as **Group** and set `actuator-sample` as the **Artifact** name. We select the `Web` dependency, which supports full stack web development with [Tomcat](http://tomcat.apache.org/) and [Spring MVC](http://docs.spring.io/spring/docs/current/spring-framework-reference/html/mvc.html), and the `Actuator` dependency which implements some production-grade features useful for monitoring and managing our application like health-checks and HTTP requests traces.\n\nFinally, we generate the project and a Zip file named `actuator-sample.zip` will be downloaded to our machine.\n\n![Spring Initializr](https://about.gitlab.com/images/blogimages/continuous-delivery-of-a-spring-boot-application-with-gitlab-ci-and-kubernetes/initializr.png){: .shadow}\n\nWe can now unzip the archive and launch the application immediately. Spring Initializr has already created everything for us. We just need to have a [Java JDK](http://openjdk.java.net/install/) 1.7 or later installed on our machine and the `JAVA_HOME` environment variable set accordingly. [OpenJDK](http://openjdk.java.net/) is the preferred option for most Linux distributions since it is readily available on their repositories. You can alternatively install [Oracle JDK](http://www.oracle.com/technetwork/java/javase/downloads/index.html) if it is a strict requirement for your team.\n\n\n```shell\n### Installing OpenJDK 8 on Debian, Ubuntu, etc.\n\nsudo apt-get install openjdk-8-jre\n\n### Installing OpenJDK 8 on Fedora, Oracle Linux, Red Hat Enteprise, CentOS, etc.\n\nsu -c \"yum install java-1.8.0-openjdk\"\n\n### Setting the JAVA_HOME environment variable\n\nexport JAVA_HOME=/path/to/your/java/home # e.g. /usr/lib/jvm/java-8-openjdk-amd64/\n\n### Extracting and launching the application\n\n~/git/actuator-sample$ unzip ~/Downloads/actuator-sample.zip -d ../\n~/git/actuator-sample$ ./mvnw spring-boot:run\n\n[...]\n\n2016-12-02 22:41:14.376  INFO 10882 --- [           main] s.b.c.e.t.TomcatEmbeddedServletContainer : Tomcat started on port(s): 8080 (http)\n2016-12-02 22:41:14.420  INFO 10882 --- [           main] com.example.ActuatorSampleApplication    : Started ActuatorSampleApplication in 17.924 seconds (JVM running for 87.495)\n```\n\nThe application is up and running and we did not write one line of code! Spring Boot is opinionated and auto-configures the application with sane default values and beans. It also scans the classpath for known dependencies and initializes them. In our case, we immediately enjoy all the production-grade services offered by [Spring Actuator](http://docs.spring.io/spring-boot/docs/current/reference/html/production-ready-endpoints.html).\n\n```shell\n~$ curl http://localhost:8080/health\n{\"status\":\"UP\",\"diskSpace\":{\"status\":\"UP\",\"total\":981190307840,\"free\":744776503296,\"threshold\":10485760}}\n```\n\nIf you wish to learn Spring Boot in greater detail, have a look at their [reference documentation](http://docs.spring.io/spring-boot/docs/current/reference/htmlsingle/) and [guides](https://spring.io/guides).\n{: .alert .alert-info}\n\nIt is time to commit our changes and push them to `origin`. To simplify things a bit, we commit directly on `master` without using [feature branches](https://docs.gitlab.com/ee/topics/gitlab_flow.html#github-flow-as-a-simpler-alternative) since collaboration is not the focus of this tutorial. Later, we will use [environment branches](https://docs.gitlab.com/ee/topics/gitlab_flow.html#environment-branches-with-gitlab-flow) as specified in the [GitLab Flow](https://docs.gitlab.com/ee/topics/gitlab_flow.html) to deploy to different environments selectively, e.g. staging and production. If you are not familiar with the [GitLab Flow](/solutions/gitlab-flow/), I strongly recommend you to read its documentation.\n\n```shell\ngit add --all\ngit commit -m \"Creates actuator-example application\"\ngit push origin master\n```\n\n## Creating a continuous delivery pipeline with GitLab CI\n\nWhile our code is now safe on GitLab, we still need to automate its integration and deployment. We need to verify each commit with an automated build and set of tests in order to discover issues as early as possible and, if the build is successful, deploy to a target environment. A few years ago, our only option was to install, configure and maintain a CI Server like [Jenkins](https://jenkins.io/) and possibly automate our deployment with a set of bash scripts. While the number of options has grown significantly, whether hosted or on the cloud, we still need to find a way to integrate our source control system with the CI Server of our choice.\n\nNot anymore though! GitLab has [fully integrated CI and CD Pipelines](/topics/ci-cd/) in its offering, allowing us to [build, test and deploy](/topics/version-control/what-is-gitlab-flow/) our code with ease.\n\nFor the purpose of this tutorial we will deploy to the [Google Cloud Container Engine](https://cloud.google.com/container-engine/) which is a cluster management and orchestration system built on the open source [Kubernetes](http://kubernetes.io/). Kubernetes is supported by all main cloud providers and can be [easily installed on any Linux server](http://kubernetes.io/docs/getting-started-guides/kubeadm/) in minutes. That said, we will be able to re-use this configuration virtually on any environment running Kubernetes.\n\nBefore we can proceed to the creation of the pipeline, we need to add a couple of files to our repository to package our application as a Docker container and to describe the target deployment in Kubernetes terms.\n\n### Packaging a Spring Boot application as a Docker container\n\nLet's start by creating the `Dockerfile` in the root directory of our project.\n\n```shell\nFROM openjdk:8u111-jdk-alpine\nVOLUME /tmp\nADD /target/actuator-sample-0.0.1-SNAPSHOT.jar app.jar\nENTRYPOINT [\"java\",\"-Djava.security.egd=file:/dev/./urandom\",\"-jar\",\"/app.jar\"]\n```\n\nThe `FROM` keyword defines the base Docker image of our container. We chose [OpenJDK](http://openjdk.java.net/) installed on [Alpine Linux](https://alpinelinux.org/) which is a lightweight Linux distribution. The `VOLUME` instruction creates a mount point with the specified name and marks it as holding externally mounted volumes from the native host or other containers. `ADD` copies the executable JAR generated during the build to the container root directory. Finally `ENTRYPOINT` defines the command to execute when the container is started. Since Spring Boot produces an executable JAR with embedded Tomcat, the command to execute is simply `java -jar app.jar`. The additional flag `java.security.edg=file:/dev/./urandom` is used to speed up the application start-up and avoid possible freezes. By default, Java uses `/dev/random` to seed its `SecureRandom` class which is known to block if its entropy pool is empty.\n\nTime to commit.\n\n```shell\ngit add Dockerfile\ngit commit -m \"Adds Dockerfile\"\ngit push origin master\n```\n\n### Define the Kubernetes deployment\n\nLet's create a file named `deployment.yml` in the root directory of our project.\n\n```yml\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n  name: actuator-sample\nspec:\n  replicas: 2\n  template:\n    metadata:\n      labels:\n        app: actuator-sample\n    spec:\n      containers:\n      - name: actuator-sample\n        image: registry.gitlab.com/marcolenzo/actuator-sample\n        imagePullPolicy: Always\n        ports:\n        - containerPort: 8080\n      imagePullSecrets:\n        - name: registry.gitlab.com\n```\n\nThis is the definition of a Kubernetes [`Deployment`](http://kubernetes.io/docs/user-guide/deployments/) named `actuator-sample`. The `replicas` element defines the target number of [`Pods`](http://kubernetes.io/docs/user-guide/pods/). Kubernetes performs automated binpacking and self-healing of the system to comply with the deployment specifications while achieving optimal utilization of compute resources. A Pod can be composed of multiple containers. In this scenario, we only include the `actuator-sample` image stored on our private [GitLab Container Registry](/blog/gitlab-container-registry/). For this reason, we need to set an entry under the `imagePullSecrets` which is used to authenticate to the GitLab Container Registry.\n\nFor a detailed explanation of Kubernetes resources and concepts refer to the [official documentation](http://kubernetes.io/).\n{: .alert .alert-info}\n\nTime to commit again and we are ready to define our GitLab CI pipeline.\n\n```shell\ngit add deployment.yml\ngit commit -m \"Adds Kubernetes Deployment definition\"\ngit push origin master\n```\n\n### Creating the GitLab CI pipeline\n\nIn order to make use of [GitLab CI](/solutions/continuous-integration/) we need to add the [`.gitlab-ci.yml`](https://docs.gitlab.com/ee/ci/yaml/) configuration file to the root directory of our repository. This file is used by [GitLab Runners](https://docs.gitlab.com/ee/ci/runners/) to manage our project's builds and deployments. Therein we can define an unlimited number of [Jobs](https://docs.gitlab.com/ee/ci/jobs/) and their role in the whole build lifecycle.\n\n```yml\nimage: docker:latest\nservices:\n  - docker:dind\n\nvariables:\n  DOCKER_DRIVER: overlay\n  SPRING_PROFILES_ACTIVE: gitlab-ci\n\nstages:\n  - build\n  - package\n  - deploy\n\nmaven-build:\n  image: maven:3-jdk-8\n  stage: build\n  script: \"mvn package -B\"\n  artifacts:\n    paths:\n      - target/*.jar\n\ndocker-build:\n  stage: package\n  script:\n  - docker build -t registry.gitlab.com/marcolenzo/actuator-sample .\n  - docker login -u gitlab-ci-token -p $CI_BUILD_TOKEN registry.gitlab.com\n  - docker push registry.gitlab.com/marcolenzo/actuator-sample\n\nk8s-deploy:\n  image: google/cloud-sdk\n  stage: deploy\n  script:\n  - echo \"$GOOGLE_KEY\" > key.json\n  - gcloud auth activate-service-account --key-file key.json\n  - gcloud config set compute/zone europe-west1-c\n  - gcloud config set project actuator-sample\n  - gcloud config set container/use_client_certificate True\n  - gcloud container clusters get-credentials actuator-sample\n  - kubectl delete secret registry.gitlab.com\n  - kubectl create secret docker-registry registry.gitlab.com --docker-server=https://registry.gitlab.com --docker-username=marcolenzo --docker-password=$REGISTRY_PASSWD --docker-email=lenzo.marco@gmail.com\n  - kubectl apply -f deployment.yml\n```\n\nLet's break the file in pieces to understand what is going on.\n\n#### Image and Services\n\n```yml\nimage: docker:latest\nservices:\n  - docker:dind\n```\n\nThe [GitLab Runner](https://docs.gitlab.com/ee/ci/runners/) can [use Docker images](https://docs.gitlab.com/ee/ci/docker/using_docker_images.html) to support our pipelines. The [`image` element](https://docs.gitlab.com/ee/ci/yaml/#image) defines the name of the Docker image we want to use. Valid images are those hosted in the local Docker Engine or on [Docker Hub](https://hub.docker.com/). The `services` element defines additional Docker images which are linked to the main container. In our case the main container is a plain Docker image while the linked container is enabled for running Docker in Docker.\n\n#### Variables\n\n```yml\nvariables:\n  DOCKER_DRIVER: overlay\n  SPRING_PROFILES_ACTIVE: gitlab-ci\n```\n\nThis is the definition of [`variables`](https://docs.gitlab.com/ee/ci/yaml/#variables) to be set on our build environment. The `DOCKER_DRIVER` signals the Docker Engine which storage driver to use. We use `overlay` for [performance reasons](https://docs.gitlab.com/ee/ci/docker/using_docker_build.html#using-the-overlayfs-driver). The `SPRING_PROFILES_ACTIVE` is very useful when dealing with Spring Boot applications. It activates [Spring Profiles](http://docs.spring.io/autorepo/docs/spring-boot/current/reference/html/boot-features-profiles.html), which provide a way to segregate parts of our application configuration and make it available only in certain environments. For instance, we can define different database URIs per environment, e.g. `localhost` when running on the developer machine and `mongo` when running within GitLab CI.\n\n#### Stages\n\n```yml\nstages:\n  - build\n  - package\n  - deploy\n```\n\nThe [`stages` element](https://docs.gitlab.com/ee/ci/yaml/#stages) defines the lifecycle of our build. We associate each [job](https://docs.gitlab.com/ee/ci/jobs/) with one stage. All jobs within a stage are run in parallel and stages are triggered sequentially in the order we define them, i.e. the next stage is initiated only when the previous one is complete.\n\n#### The `maven-build` job\n\n```yml\nmaven-build:\n  image: maven:3-jdk-8\n  stage: build\n  script: \"mvn package -B\"\n  artifacts:\n    paths:\n      - target/*.jar\n```\n\nThis is a job definition. Jobs can have any name except keywords. Have a look at the `.gitlab-ci.yml` [documentation](https://docs.gitlab.com/ee/ci/yaml/) for the complete list of keywords.\n\nThe scope of this job is to perform a [Maven](https://maven.apache.org/index.html) build. For this reason, we define the `maven:3-jdk-8` as the Docker image on which this job should execute. This image comes with Maven 3 and the Java JDK 8 pre-installed for us.\n\nWe then specify `build` as the `stage` of this job. Jobs associated with the same stage run concurrently. This is extremely useful if you need to cross-compile your application. For instance, if we wanted to compile and test our application also on Java JDK 7, we could simply create another job with a different name and use the image `maven:3-jdk-7`.\n\n```yml\nmaven-test-jdk-7:\n  image: maven:3-jdk-7\n  stage: build\n  script: \"mvn package -B\"\n  artifacts:\n    paths:\n      - target/*.jar\n```\n\nAs previously said, the `maven-test-jdk-7` job runs in parallel with the `maven-build`. Hence, it does not have an impact on the pipeline execution time.\n\nThe [`script`](https://docs.gitlab.com/ee/ci/yaml/#script) is a shell command to be executed by the GitLab Runner. The `mvn package -B` triggers a non-interactive Maven build up to the `package` phase. This phase is specific to the [Maven build lifecycle](https://maven.apache.org/guides/introduction/introduction-to-the-lifecycle.html) and it includes also the `validate`, `compile` and `test` phases. That means that our Maven project will be validated, compiled and (unit) tested as well. Tests are to be included in the `src/test/java` folder. In our specific case, Spring Initializr has already created a unit test which verifies that the application context loads without errors. We are free to add as many unit tests as we like. Finally, the `package` phase creates the executable JAR.\n\nTo persist the executable JAR and share it across jobs, we specify job [`artifacts`](https://docs.gitlab.com/ee/ci/yaml/#artifacts). These are files or directories that are attached to the build after success and made downloadable from the UI in the Pipelines screen.\n\n![Downloading artifacts from pipelines](https://about.gitlab.com/images/blogimages/continuous-delivery-of-a-spring-boot-application-with-gitlab-ci-and-kubernetes/artifacts.png){: .shadow}\n\n\n#### The `docker-build` job\n\n```yml\ndocker-build:\n  stage: package\n  script:\n  - docker build -t registry.gitlab.com/marcolenzo/actuator-sample .\n  - docker login -u gitlab-ci-token -p $CI_BUILD_TOKEN registry.gitlab.com\n  - docker push registry.gitlab.com/marcolenzo/actuator-sample\n```\n\nThe `docker-build` job packages the application into a Docker container. We define `package` as the build `stage` since we need the `maven-build` job to produce the executable JAR beforehand.\n\nThe scripts are a typical sequence of `docker` commands used to build an image, log in to a private registry and push the image to it. We will be pushing images to the [GitLab Container Registry](/blog/gitlab-container-registry/).\n\nThe [`$CI_BUILD_TOKEN`](https://docs.gitlab.com/ee/user/project/new_ci_build_permissions_model.html#container-registry) is a pre-defined variable which is injected by GitLab CI into our build environment automatically. It is used to log in to the GitLab Container Registry.\n\nFor a complete list of pre-defined variables, have a look at the [variables documentation](https://docs.gitlab.com/ee/ci/variables/).\n{: .alert .alert-info}\n\n#### The `k8s-deploy` job\n\nThis job is responsible for deploying our application to the [Google Kubernetes Engine](https://cloud.google.com/container-engine/). I purposely decided to make use of the [Google Cloud SDK](https://cloud.google.com/sdk/gcloud/) (`gcloud`) because it gives us the possibility to programmatically create and manage Google Container Engine clusters and other products of the Google Cloud ecosystem. In this tutorial, we will simplify things a bit by creating the Google Container Engine cluster beforehand through the GUI.\n\nFirst, we create a Google Cloud Project named `actuator-sample`. Take note of the `Project ID` since it sometimes differs from the project name we specify. Then we create a Google Kubernetes Engine cluster named `actuator-sample` as well. We can choose any machine type and any number of nodes. For the purpose of this tutorial one node and a small machine are sufficient. Let's take note of the `zone`.\n\n![Create a container cluster](https://about.gitlab.com/images/blogimages/continuous-delivery-of-a-spring-boot-application-with-gitlab-ci-and-kubernetes/create-gce-cluster.png){: .shadow}\n\nFinally we need to create a service account which is necessary to perform a non-interactive login with `gcloud`. Navigate to Google Cloud **API Manager** > **Credentials** > **Create Credentials** and create a JSON key for the `Compute Engine default service account`.\n\nWe can now analyze the configuration.\n\n```yml\nk8s-deploy:\n  image: google/cloud-sdk\n  stage: deploy\n  script:\n  - echo \"$GOOGLE_KEY\" > key.json # Google Cloud service account key\n  - gcloud auth activate-service-account --key-file key.json\n  - gcloud config set compute/zone europe-west1-c\n  - gcloud config set project actuator-sample\n  - gcloud config set container/use_client_certificate True\n  - gcloud container clusters get-credentials actuator-example\n  - kubectl delete secret registry.gitlab.com\n  - kubectl create secret docker-registry registry.gitlab.com --docker-server=https://registry.gitlab.com --docker-username=marcolenzo --docker-password=$REGISTRY_PASSWD --docker-email=lenzo.marco@gmail.com\n  - kubectl apply -f deployment.yml\n```\n\nWe use the `google/cloud-sdk` image for this process since it comes preloaded with `gcloud` and all components and dependencies of the Google Cloud SDK including alpha and beta components. We obviously chose `deploy` as the `stage` since we want our application to be packaged beforehand and its container pushed to the GitLab Container Registry. Then we execute a set of scripts.\n\nThe `echo \"$GOOGLE_KEY\" > key.json` script injects the Google Cloud service account key in the container. `$GOOGLE_KEY` is a Secure Variable having the content of the Google Cloud service account key as its value. [Secure Variables](https://docs.gitlab.com/ee/ci/variables/#user-defined-variables-secure-variables) are user-defined variables that should not be shown in the `.gitlab-ci.yml` file. They are set per project by navigating to **Project** > **Variables** > **Add Variable** in GitLab.\n\n![Secure Variables](https://about.gitlab.com/images/blogimages/continuous-delivery-of-a-spring-boot-application-with-gitlab-ci-and-kubernetes/secure-variables.png){: .shadow}\n\nThe `gcloud auth activate-service-account --key-file key.json` script performs the non-interactive authentication process. The `gcloud config set ...` scripts are selecting the target project, zone and cluster. Make sure these values correspond to those you jotted down before. The `gcloud container clusters get-credentials actuator-example` script downloads the `kubectl` configuration file. If we wanted to use Kubernetes on another cloud provider or custom installation, we would source the `kubectl` configuration `~/.kube/config` without the need to interact with `gcloud`.\n\nThe `kubectl create secret docker-registry ...` script creates the `imagePullSecret` we had defined in the `deployment.yml`. This is used by Kubernetes to authenticate with our private GitLab Container Registry and download the container images. The `kubectl delete secret` is necessary because the Kubernetes API is lacking the `replace` operation for `docker-registry` secrets. In a real-world scenario, I would suggest handling [Kubernetes secrets](http://kubernetes.io/docs/user-guide/secrets/) that can affect multiple pipelines (such as a password for a private Docker registry) in a separate pipeline or through configuration management tools like [Ansible](https://www.ansible.com/), [Salt](https://saltstack.com/), [Puppet](https://puppet.com/) or [Chef](https://www.chef.io/). The reason is that such secrets should be rotated periodically for security reasons and updated in each GitLab project using them. There is also the risk of interference between pipelines because of the `kubectl delete` command. Note that `$REGISTRY_PASSWD` is another Secure Variable.\n\nTime to check if everything is in order on our cluster.\n\n```shell\n$ kubectl get deployments\nNAME              DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE\nactuator-sample   2         2         2            2           2m\n$ kubectl get pods\nNAME                               READY     STATUS    RESTARTS   AGE\nactuator-sample-3641958612-3e5xy   1/1       Running   0          2m\nactuator-sample-5542343546-fr4gh   1/1       Running   0          2m\n```\n\n![Kubernetes](https://about.gitlab.com/images/blogimages/continuous-delivery-of-a-spring-boot-application-with-gitlab-ci-and-kubernetes/kubernetes.png){: .shadow}\n\nDeployed!\n\n#### GitLab Environments\n\nBefore concluding the tutorial, we will learn about [GitLab Environments](https://docs.gitlab.com/ee/ci/environments/index.html) which enable us to track environments and deployments.\n\nLet's refactor the `k8s-deploy` job and split it in two. One job will target the staging environment and the other the production environment.\n\n```yml\nk8s-deploy-staging:\n  image: google/cloud-sdk\n  stage: deploy\n  script:\n  - echo \"$GOOGLE_KEY\" > key.json\n  - gcloud auth activate-service-account --key-file key.json\n  - gcloud config set compute/zone europe-west1-c\n  - gcloud config set project actuator-sample\n  - gcloud config set container/use_client_certificate True\n  - gcloud container clusters get-credentials actuator-example\n  - kubectl delete secret registry.gitlab.com\n  - kubectl create secret docker-registry registry.gitlab.com --docker-server=https://registry.gitlab.com --docker-username=marcolenzo --docker-password=$REGISTRY_PASSWD --docker-email=lenzo.marco@gmail.com\n  - kubectl apply -f deployment.yml --namespace=staging\n  environment:\n    name: staging\n    url: https://example.staging.com\n  only:\n  - master\n\nk8s-deploy-production:\n  image: google/cloud-sdk\n  stage: deploy\n  script:\n  - echo \"$GOOGLE_KEY\" > key.json\n  - gcloud auth activate-service-account --key-file key.json\n  - gcloud config set compute/zone europe-west1-c\n  - gcloud config set project actuator-sample\n  - gcloud config set container/use_client_certificate True\n  - gcloud container clusters get-credentials actuator-example\n  - kubectl delete secret registry.gitlab.com\n  - kubectl create secret docker-registry registry.gitlab.com --docker-server=https://registry.gitlab.com --docker-username=marcolenzo --docker-password=$REGISTRY_PASSWD --docker-email=lenzo.marco@gmail.com\n  - kubectl apply -f deployment.yml --namespace=production\n  environment:\n    name: production\n    url: https://example.production.com\n  when: manual\n  only:\n  - production\n```\n\nThe `environment` keyword associates the job with a specific environment while the `url` element is used to generate a handy hyperlink to our application on the GitLab Environments page (found under your project's `Pipelines > Environments`). The `only` keyword signals to GitLab CI that the job should be executed only when the pipeline is building the listed branches. Finally, `when: manual` is used to turn the job execution from automatic to manual. Turning the execution of this job to `automatic` would project us in the world of [Continuous Deployment](/blog/continuous-integration-delivery-and-deployment-with-gitlab/#continuous-deployment) rather than [Continuous Delivery](/blog/continuous-integration-delivery-and-deployment-with-gitlab/#continuous-delivery). From a Kubernetes perspective, we are making use of `namespaces` to segregate the different environments.\n\nBy committing on `master` and `production` we [trigger a pipeline per environment](/blog/ci-deployment-and-environments/). As mentioned before, we are not making use of any collaboration tool because it is out of the scope of this tutorial. In real-world scenarios, we would use [merge requests](/topics/version-control/what-is-gitlab-flow/#merge-request) with [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/) to move code across branches. Merge requests allow the team to review and discuss the changes before they get merged into the target branch. [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/) take that one step further by spinning up dynamic environments for our merge requests, offering the team access to a deployed instance of our application without the need of checking out the branch. This is extremely useful not only for non-technical members of the team, but also to collaborators and project managers to preview the changes without having to clone and install the app and its dependencies when evaluating a proposal.\n\n```shell\ngit commit -am \"Showcasing Pipelines\"\ngit push origin master\ngit checkout -b production\ngit push origin production\n```\n![Pipelines](https://about.gitlab.com/images/blogimages/continuous-delivery-of-a-spring-boot-application-with-gitlab-ci-and-kubernetes/pipelines.png){: .shadow}\n\nThe Pipelines screen details all pipeline executions. We can gather information about the branch and the individual result of each stage. In the case of the `production` pipeline the `k8s-deploy-production` is not executed automatically as expected but can be triggered from the GUI from where we can also download the build artifacts.\n\n![Environments](https://about.gitlab.com/images/blogimages/continuous-delivery-of-a-spring-boot-application-with-gitlab-ci-and-kubernetes/environments.png){: .shadow}\n\nEnvironments are listed on a separate page, from which it is possible to redeploy the latest version of an environment or to roll back to a particular version of the environment by accessing the relative details page.\n\n![Rollbacks](https://about.gitlab.com/images/blogimages/continuous-delivery-of-a-spring-boot-application-with-gitlab-ci-and-kubernetes/rollbacks.png){: .shadow}\n\n\n## Conclusion\n\nIn this tutorial, we were able to create a [Continuous Delivery](https://en.wikipedia.org/wiki/Continuous_delivery) pipeline with ease thanks to the suite of [GitLab](/) products that supported us at every stage. [Spring Boot](https://projects.spring.io/spring-boot/) gave us agility by auto-configuring the application context and offering production-grade services out of the box. [Kubernetes](http://kubernetes.io/) abstracted us from the compute resources and orchestration duties allowing us to define only the desired deployment state. [GitLab CI](/solutions/continuous-integration/) was the core engine of our pipeline. Its declarative [`.gitlab-ci.yml`](https://docs.gitlab.com/ee/ci/yaml/) file allowed us to define, version and manage our pipelines while the GUI gave us full visibility and control.\n\nWhile this is a basic example, it clearly shows the immense benefits any team or company can gain by using the unified GUI of GitLab for issues, code review, CI and CD.\n\n## About Guest Author\n\n[Marco Lenzo](https://twitter.com/marco_lenzo) is a Software Architect always up for a challenge. He has expertise in transaction processing and platform as a service (PaaS). Java, Spring, Go and Kubernetes are currently his bread and butter.\n\n\u003C!-- closes https://gitlab.com/gitlab-com/blog-posts/issues/309 -->\n\u003C!-- cover image: https://unsplash.com/photos/G86MS2ZsiJA -->\n\n\u003Cstyle>\n  .h4 {\n    font-weight: bold;\n  }\n\u003C/style>\n",{"slug":8567,"featured":6,"template":678},"continuous-delivery-of-a-spring-boot-application-with-gitlab-ci-and-kubernetes","content:en-us:blog:continuous-delivery-of-a-spring-boot-application-with-gitlab-ci-and-kubernetes.yml","Continuous Delivery Of A Spring Boot Application With Gitlab Ci And Kubernetes","en-us/blog/continuous-delivery-of-a-spring-boot-application-with-gitlab-ci-and-kubernetes.yml","en-us/blog/continuous-delivery-of-a-spring-boot-application-with-gitlab-ci-and-kubernetes",{"_path":8573,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8574,"content":8579,"config":8583,"_id":8585,"_type":16,"title":8586,"_source":17,"_file":8587,"_stem":8588,"_extension":20},"/en-us/blog/proposed-server-purchase-for-gitlab-com",{"title":8575,"description":8576,"ogTitle":8575,"ogDescription":8576,"noIndex":6,"ogImage":4861,"ogUrl":8577,"ogSiteName":692,"ogType":693,"canonicalUrls":8577,"schema":8578},"Proposed server purchase for GitLab.com","What hardware we're considering purchasing now that we have to move GitLab.com to metal.","https://about.gitlab.com/blog/proposed-server-purchase-for-gitlab-com","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Proposed server purchase for GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2016-12-11\",\n      }",{"title":8575,"description":8576,"authors":8580,"heroImage":4861,"date":8581,"body":8582,"category":14},[2269],"2016-12-11","\n\nWe want to make GitLab.com fast and we [knew it was time to leave the cloud](/blog/why-choose-bare-metal/) and purchase our own servers.\nIn this post is our thinking about what chassis, rack, memory, CPU, network, power, and hosting to buy.\nWe wanted to share what we learned and get your feedback on our proposal and questions.\nWhen you reply to a question in the comments on our blog or Hacker News please reference it with the letter and number: 'Regarding R1'.\nWe'll try to update the questions with preliminary answers as we learn more.\n\n\u003C!-- more -->\n\n## Overview\n\nToday, GitLab.com hosts 96TB of data, and that number is growing rapidly. We\nare attempting to build a fault-tolerant and performant CephFS cluster. We are\nalso attempting to move GitLab application servers and supporting services\n(e.g. PostgreSQL) to bare metal.\n\nNote that for now our CI Runners will stay in the cloud. Not only are they are\nmuch less sensitive to latency, but autoscaling is easier with a cloud service.\n\n### Chassis\n\nOne of the team members that will join GitLab in 2017 recommended using a [6028TP-HTTR SuperMicro 2U Twin2 server](https://www.supermicro.nl/products/system/2U/6028/SYS-6028TP-HTTR.cfm) chassis that has 4 dual processor nodes and is 2 [rack units](https://en.wikipedia.org/wiki/Rack_unit) (U) high. The advantages are:\n\n1. Great density, 0.5U per dual processor server\n1. You have one common form factor\n1. Power supplies are shared for great efficiency similar to [blade servers](https://en.wikipedia.org/wiki/Blade_server)\n1. The network is per node for more bandwidth and reliability (like individual server)\n\nWe use the [2U Twin2](https://www.supermicro.com/products/nfo/2UTwin2.cfm) instead of the [1U Twin](https://www.supermicro.com/products/nfo/1UTwin.cfm) because it fits one more 3.5\" hard drive (3 per node instead of 2).\n\nThis server is on the list of global SKU's for SuperMicro.\nWe'll also ask for quotes from other vendors to see if they have a competitive alternative.\nFor example HPE has the [Apollo 2000 series](https://www.hpe.com/h20195/v2/getpdf.aspx/c04542552.pdf?ver=7).\n\nC1 Should we use another version of the chassis than HTTR?\n\nC2 What is the best Dell equivalent? => [C6320](http://www.dell.com/us/business/p/poweredge-c6320/pd)\n\n### Servers\n\nWe need the following servers:\n\n1. 32x File storage (CephFS OSD)\n1. 3x File Monitoring (CephFS MON)\n1. 8x Application server ([Unicorn](https://bogomips.org/unicorn/))\n1. 7x Background jobs ([Sidekiq](http://sidekiq.org/))\n1. 5x Key value store ([Redis Sentinel](https://redis.io/topics/sentinel))\n1. 4x Database (PostgreSQL)\n1. 3x Load balancers (HAproxy)\n1. 1x Staging\n1. 1x Spare\n\nFor a total of 64 nodes.\n\nWe would like to have one common node so that they are interchangeable.\nThis would mean installing only a few disks per node instead of having large fileservers.\nThis would distribute failures and IO.\n\n![IOPS on GitLab.com](https://about.gitlab.com/images/blogimages/write_iops.png)\n\nThe above picture shows the currently number of Input/output Operations Per\nSecond (IOPS) on GitLab.com. On our current NFS servers, our peak write IOPS\noften hit close to 500K, and our peak read IOPS reach 200K. These numbers\nsuggest that using spinning disks alone may not be enough; we need to use\nhigh-performance SSDs judiciously.\n\nOne task that we could not fit on the common nodes was PostgreSQL.\nOur current plan is to make PostgreSQL distributed in 2017 with the help of [Citus](https://www.citusdata.com/).\nBut for now, we need to scale vertically so we need a lot of memory and CPU.\nWe need at least a primary and secondary database.\nWe wanted to add a second pair for testing and to ensure spares in case of failure.\nDetails about this are in the following sections.\n\nChoosing a common node will mean that file storage servers will have too much CPU and that application servers will have too much disk space.\nWe plan to remedy that by running everything on Kubernetes.\nThis allows us to have a blended workload using all CPU and disk.\nFor example we can combine file storage and background jobs on the same server since one is disk heavy and one is CPU heavy.\nWe will start by having one workload per server to reduce complexity.\nThis means that when we need to grow we can still unlock almost twice as much disk space and CPU by blending the workloads.\nPlease note that this will be container based, to get maximum IO performance we won't virtualize our workload.\n\nS1 Shall we spread the database servers among different chassis to make sure they don't all fail when one chassis fails?\n\nS2 Does Ceph handle running 60 OSD nodes well or can this cause problems?\n\n### CPU\n\nThe [SuperServer 6028TP-HTTR](https://www.supermicro.nl/products/system/2U/6028/SYS-6028TP-HTTR.cfm) supports dual E5-2600v4 processors per node.\nWe think the [E5-2630v4](http://ark.intel.com/products/92981/Intel-Xeon-Processor-E5-2630-v4-25M-Cache-2_20-GHz) is a good blend of power and cost.\nIt has 20 virtual cores at 2.20Ghz, 25MB cache, and costs about $669 per processor.\nEvery physical core is two virtual cores due to [hyperthreading](https://en.wikipedia.org/wiki/Hyper-threading).\nA slightly more powerful processor is the [E5-2640v4](https://ark.intel.com/products/92984/Intel-Xeon-Processor-E5-2640-v4-25M-Cache-2_40-GHz) but while the [SPECint score](https://en.wikipedia.org/wiki/SPECint) increases from 845 to 887 the costs increase from $669 to $939.\nYou can find the scores by entering a [search on spec.org](https://www.spec.org/cgi-bin/osgresults?conf=rint2006) with 'Hewlett Packard Enterprise' as the hardware vendor and looking for ProLiant DL360 Gen9 as the platform.\n\nOur current SQL server has one E5-2698B v3 with 32 virtual cores.\nPostgreSQL commonly uses about 20-25 virtual cores.\nMoving to dual processors should already help a lot.\nTo give us more months to grow before having to distribute the database we want to purchase some headroom.\nThat is why we're getting a [E5-2687Wv4](https://ark.intel.com/products/91750/Intel-Xeon-Processor-E5-2687W-v4-30M-Cache-3_00-GHz) for the database servers.\nThis processor costs $2100 instead of $670 but has 4 extra virtual cores and runs continuously on 3 Ghz instead of 2.2 Ghz.\nComprated to the E5-2630v4 that leads to a SPEC score or 1230 instead of 845 and 51.3 SPEC per virtual core instead of 42.3.\nFor the 4 dual processor database servers this upgrade will cost $11k.\nWe think it is worth it since the 20-40% of extra performance will buy us the month or two of extra time to distribute the database that we need.\n\n### Disk\n\nEvery node can fit 3 larger (3.5\") harddrives.\nWe plan to purchase the largest one available, a 8TB Seagate with 6Gb/s SATA and 7.2K RPM.\nAt 60 nodes this will give us 1.4PB of raw storage.\nAt a replication factor of 3 for Ceph this is 480TB of usable storage.\nRight now GitLab.com uses 96TB (54TB for repo's, 21TB for uploads, 21TB for LFS and build artifacts) so we can grow by a factor of almost 5.\n\nDisks can be slow so we looked at improving latency.\nHigher RPM hard drives typically come in [GB instead of TB sizes](http://www.seagate.com/enterprise-storage/hard-disk-drives/enterprise-performance-15k-hdd/).\nGoing all SSD is too expensive.\nTo improve latency we plan to fit every server with an SSD card.\nOn the fileservers this will be used as a cache.\nWe're thinking about using [Bcache](https://en.wikipedia.org/wiki/Bcache) for this.\n\nWe plan to use [Intel DC P3700 series](http://www.intel.com/content/www/us/en/solid-state-drives/ssd-dc-p3700-spec.html) or slight less powerful [P3600 series](http://www.intel.com/content/www/us/en/solid-state-drives/ssd-dc-p3600-spec.html) of SSD's because they are recommended by the CephFS experts we hired.\nFor most servers it will be the [800GB SSDPEDMD800G4](http://www.supermicro.com/products/nfo/PCI-E_SSD.cfm?show=Intel).\nFor the database servers we plan to use the 1.6TB variant to have more headroom.\nThe endurance we need for the database server is 90TB/year, the 3600 series is already above 4PB of endurance.\n\nWe plan to add a 64GB [SSD SATADOM boot drive](https://www.supermicro.com/products/nfo/SATADOM.cfm) to the servers to boot from.\nThis way we can keep the large SSD as a separate volume.\n\nD1 We plan to configure the disks as just a bunch of disks (JBOD) but heard that this caused performance problems with some controllers. Is this likely to impact us?\n\nD2 Should we use Bcache to improve latency on the Ceph OSD servers with SSD? => Make sure you're using a kernel >= 4.5, since that's when a bunch of stability patches landed (https://lkml.org/lkml/2015/12/5/38).\n\nD3 We heard concerns about fitting the PCIe 3.0 x 4 SSD card into [our chassis](https://www.supermicro.nl/products/system/2U/6028/SYS-6028TP-HTTR.cfm) that supports a PCI-E 3.0 x16 Low-profile slot. Will this fit? => [Florian Heigl](http://disq.us/p/1eedj2n): \"Somewhat unlikely you will be able to fit a P3700. I have a Twin^2 too and the only SSD I could fit there was a consumer NVME with a PCIe adapter board.\"\n\nD4 Should we ask for 8TB HGST drives instead of Seagate since they seem [more reliable](https://www.backblaze.com/blog/hard-drive-reliability-stats-q1-2016/).\n\nD5 Is it a good idea to have a boot drive or should we use [PXE boot](https://en.wikipedia.org/wiki/Preboot_Execution_Environment) every time it starts? => [dsr_](https://news.ycombinator.com/item?id=13153336): You want a local boot drive, and you want it to fall back to PXE booting if the local drive is unavailable. Your PXE image should default to the last known working image, and have a boot-time menu with options for a rescue image and an installer for your distribution of choice.\n\nD6 Should we go for the 3700 series SSD or save some money and go for the 3600 series? Both for the normal and the SQL servers?\n\nD7 We're planning on one SSD per node. For the OSD nodes (file server) that would mean having the Ceph journal and bcache on the same SSD. Is this a good idea?\n\n### Memory\n\nSuppose one node runs both as application server and fileserver.\nWe recommend virtual cores + 1 instances of Unicorn of about 0.5GB each, for a total of 21GB per node (2 processors * 21 unicorns per processor * 0.5GB).\nCeph recommends about 1GB per TB of data which comes out to 24 per node.\nSo theoretically we can fit everything in 45GB so 64GB should be enough.\n\nBut in practice we've seen 24TB OSD nodes use 79GB of memory.\nAnd the rule of thumb is have about 2GB per virtual core for background jobs available (40GB).\nSo in order not to be to low we'll spend the extra $30k to have 128GB of ECC memory per node instead of 64GB.\n\nFor the SQL nodes we'll need much more memory, we currently give it 440GB and it uses all of that.\nThe database is about 250GB in size and growing with 40GB per month.\nAt 250GB of server memory we redlined the server, probably because it no longer fits into memory.\nTheoretically the server supports 2TB of memory but it needs to fit in 16 memory slots per node.\nWe wanted to start with 1TB per server but we're not sure if we should go from a 64GB DIMM to 128GB to be able to expand later.\nBy having only half of the memory banks full you get half the bandwidth.\nAnd 64GB DIMMs already cost twice as much per GB as 32GB DIMMs, let alone 128GB ones.\nAt a price of about $940 per 64 DIMM the cost for 1TB of memory already is $15k per server.\n\nNote that larger sizes such as 64GB come in the form of LRDIMM that has a [small performance penalty](https://www.microway.com/hpc-tech-tips/ddr4-rdimm-lrdimm-performance-comparison/) but this looks acceptable.\n\nM1. Should we use 128GB DIMMS to be able to expand the database server later even though the will double the cost and half the bandwidth?\n\n### Network\n\nThe servers come with 2x 10Gbps RJ45 by default (Intel X540 Dual port 10GBase-T).\nWe want to [dual bound](https://docs.oracle.com/cd/E37670_01/E41138/html/ch11s05.html) the network connections to increase performance and reliability.\nThis will allow us to take routers out of service during low traffic times, for example to restart them after a software upgrade.\nWe think that 20Gbps is enough bandwidth to handle our data access and replication needs, right now our highest peaks are 1 Gbps.\nThis is important because we want to have minimal latency between the Ceph servers so network congestion would be a problem.\n\nCeph reference designs recommend a separated front and back network with the back network reserved for Ceph traffic.\nWe think that this is not needed as long as there is enough capacity.\nWe do want to have user request termination in a DMZ, so our HA proxy servers will be the only ones with a public IP.\n\nEach of the two physical network connections will connect to a different top of rack router.\nWe want to get a Software Defined Networking (SDN) compatible router so we have flexibility there.\nWe're considering the [10/40GbE SDN SuperSwitch (SSE-X3648S/SSE-X3648SR)](https://www.supermicro.com/products/accessories/Networking/SSE-X3648S.cfm) that can switch 1440 Gbps.\n\nApart from those routers we'll have a separate router for a 1Gbps management network.\nFor example to make [STONITH](https://en.wikipedia.org/wiki/STONITH) reliable when there is a lot of traffic on the normal network.\nEach node already has a separate 1Gbps connection for this.\n\nWe have 64+1 nodes (1 for backup) and most routers seem to have 48 ports.\nEvery node has 2 network ports so that is a need for 130 ports in total.\nWe're not use if we can use 3 routers with 48 ports each (144 in total) to cover that.\n\nN1 Which router should we purchase?\n\nN2 How do we interconnect the routers while keeping the network simple and fast?\n\nN3 Should we have a separate network for Ceph traffic?\n\nN4 Do we need an SDN compatible router or can we purchase something more affordable?\n\nN5 What router should we use for the management network?\n\n### Backup\n\nWe're still early in figuring out the backup solution so there are still lots of questions.\n\nBacking up 480TB of data (expected size in 2017) is pretty hard.\nWe thought about using [Google Nearline](https://cloud.google.com/storage-nearline/) because with a price of $0.01 per GB per month means that for $4800 we don't have to worry about much.\nBut restoring that over a 1Gbps connection takes 44 days, way too long.\n\nWe mainly want our backup to protect us against human and software errors.\nBecause all the files are already replicated 3 times hardware errors are unlikely to affect us.\nOf course we should have a good [Ceph CRUSH map](http://docs.ceph.com/docs/jewel/rados/operations/crush-map/) to prevent storing multiple copies on the same chassis.\n\nWe're most afraid of human error or Ceph corruption. For that reason we don't want to replicate on the Ceph level but on the file level.\n\nWe're thinking about using [Bareos backup software](https://www.bareos.org/en/) to replicate to a huge fileserver.\nWe're inspired by the posts about the [latest 480TB Backblaze storage pod 6.0](https://www.backblaze.com/blog/open-source-data-storage-server/) and these are available for $6k without drives from [Backuppods](https://www.backuppods.com/).\nBut SuperMicro offers a [comparable solution in the form of a SuperChassis that can hold 90 drives](https://www.supermicro.com/products/chassis/4U/946/SC946ED-R2KJBOD).\nAt 8TB per drive that is 720TB of raw storage.\nEven with RAID overhead it should be possible to have 480TB of usable storage (66%).\n\nThe SuperChassis is only hard drives, it still needs a controller. In a [reference architecture by Nexenta (PDF download)](https://nexenta.com/sites/default/files/docs/Nexenta_SMC_RA_DataSheet.pdf) two [SYS6028U](https://www.supermicro.com/products/system/2u/6028/sys-6028u-tr4_.cfm) with E5-2643v3 processors and 256GB of RAM is recommended. Unlike smaller configurations this one doesn't come with an SSD for [ZFS L2ARC](https://blogs.oracle.com/brendan/entry/test).\n\nSince backups are mostly linear we don't need an SSD for caching. In general 1GB of memory per TB of raw ZFS disk space is recommended. That would mean getting 512GB of RAM, 16x 32GB. Unlike the reference architecture we'll go with one controller. We're considering the [SuperServer 1028R-WC1RT](https://www.supermicro.com/products/system/1U/1028/SYS-1028R-WC1RT.cfm) since it is similar to our other servers, 1U, has 2x 10Gbps, 16 DIMM slots, and has 2 PCI slots. We'll use our regular [E5-2630v4](http://ark.intel.com/products/92981/Intel-Xeon-Processor-E5-2630-v4-25M-Cache-2_20-GHz) processor.\n\nThe question is if this controller can saturate the 20 Gbps uplink.\nFor this it needs to use both 12 Gbps SAS buses.\nAnd each drive has to do at least 30 MBps which seems reasonable for a continuous read.\n\nThe problem is that even at 20Gbps a full restore takes 2 days.\nOf course many times you need to restore only part of the files (uploads).\nAnd most of the time it won't contain 480TB (we'll start at about 100TB).\nThe question is if we can accept this worst case scenario for GitLab.com.\n\nAn alternative would be to use multiple controllers.\nBut you can't aggregate ZFS pools over multiple servers.\nAnother option would be to have one controller with more IO.\nWe can use multiple disk enclosures and multiple SAS buses.\nAnd we can add more network ports and/or switch to 40Gbps.\nBut this all seems pretty complicated.\n\nB0 Are we on the right track here or is 20 Gbps of restore speed not OK?\n\nB1 Should we go for the [90 or 60 drive SuperChassis](https://www.supermicro.com/products/chassis/4U/?chs=946)? It looks like 60 drive one has more peak power (1600W vs. 800W) to start the drives.\n\nB2 How should we configure the SuperChassis? [ZFS on Linux](http://zfsonlinux.org/) with [RAIDZ3](https://icesquare.com/wordpress/zfs-performance-mirror-vs-raidz-vs-raidz2-vs-raidz3-vs-striped/)?\n\nB3 Will the SuperChassis be able to saturate the 20Gbsp connection?\n\nB4 Should we upgrade the networking on the SuperChassis to be able to restore even faster?\n\nB5 Is Bareos the right software to use?\n\nB6 How should we configure the backup software?  Should we use incremental backups with parallel jobs to speed things up?\n\nB7 Should we use the live filesystem or [CephFS snapshots](http://docs.ceph.com/docs/master/dev/cephfs-snapshots/) to back up from?\n\nB8 How common is it to have a tape or cloud backup in addition to the above?\n\nB9 Should we pick the top load model or [one of the front and rear access models](https://www.supermicro.com/products/chassis/JBOD/index.cfm?show=SELECT&storage=90).\n\nB10 Can we connect two SAS cables to get 2x 12 Gbps?\n\nB11 What [HBA card](https://www.supermicro.com/products/nfo/storage_cards.cfm) should be added to the controller or does it come with an LSI 3108?\n\nB12 Is it smart to make the controller a separate 1U box or should we repurpose some of our normal nodes for this?\n\nB13 Any hints on how to test the backup restore (on AWS or our hardware, how often, etc.)?\n\n### Rack\n\nThe default rack height seems to be 45U nowadays (42U used to be the standard).\n\nIt is used as follows:\n\n- 32U for 16 chassis with 64 nodes\n- 3U for three network routers\n- 1U for the management network\n- 4U for the disk enclosure\n- 1U for the disk controller\n- 4U spare for 2 new chassis (maybe distributed PostgreSQL servers)\n\n### Power\n\nEach chassis has a 2000 watt power supply (comes to 1kW per U), 32kW in total.\nNormal usage is guessed at 60% of the rated capacity, about 19kW.\nThat doesn't account for the routers and backup.\nBoth hosting providers quoted 4 x 208v 30A power supplies (2 for redundancy).\n\nP1 Does the quoted supply seem adequate for our needs?\n\n### Hosting\n\nWe've worked in [an issue](https://gitlab.com/gitlab-com/infrastructure/issues/732) to see where we should host.\n\nApart from the obvious (reliable, affordable) we had the following needs:\n\n- [AWS Direct connect](https://aws.amazon.com/directconnect/details/) so we can use the cloud for temporary application server needs\n- Based on the east coast of the USA since it provides the best latency tradeoff for most of our users\n- Advanced remote hands service so we don't have to station people near the datacenter at all times\n- Ability to upgrade from one rack to a private cage\n\nThe following networking options are a plus:\n\n- Carrier neutral (all major global network providers in its meet-me facility)\n- Backbones to other locations to provide cheap 2nd site transit\n- CDN services to reduce origin bandwidth costs\n\nSo far we've gotten quotes from [QTS in Ashburn, VA](http://www.qtsdatacenters.com/data-centers/ashburn) and [NYI in Bridgewater, NJ](https://www.nyi.net/datacenters/new-jersey/).\n\nH1 Any watchouts when selecting hosting providers?\n\nH2 Should we install the servers ourselves or is it OK to let the hosting provider do that?\n\nH3 How can we minimize installation costs? Should we ask to configure the servers to PXE boot?\n\nH4 Is there an Azure equivalent for AWS Direct Connect? => Azure will let you work with a provider to \"peer into\" the Azure network at a data center of your choice. So for example we could pay to have a circuit established in a data center that was linked into the Azure 'US East 2' data center (where we currently host out of) for direct connectivity needs.\n\n### Expense\n\nWe can't give cost details since all the quotes we receive are confidential.\nThe cloud hosting for GitLab.com excluding GitLab CI is currently costing us about $200k per month.\nThe capital needed for going to metal would be less than we pay for 1 quarter of hosting.\nThe hosting facility costs look to be less than $10k per month.\nIf you spread the capital costs over 2.5 years (10 quarters) it is 10x cheaper to host your own.\n\nOf course the growth of GitLab.com will soon force us to buy additional hardware.\nBut we would also have to pay extra for additional cloud capacity.\nOur proposed buying plan is about 5x the capacity we need now.\nHaving your own hardware means you're always overprovisioned.\nAnd we could probably have reduced the cost of cloud hosting by focussing on it.\n\nThe bigger expense will be hiring more people to deal with the additional complexity.\nWe'll probably need to hire a couple of people more to deal with this.\n\nWe looked into initially having disks in only half the servers but that saves only $20k ($225 per disk) and it would create a lot of work when we eventually have to install them.\n\nE1 If we want to look at leasing should we do that through SuperMicro or third party?\n\nE2 Are there ways we can save money?\n\n## Details\n\nOur detailed calculations and notes can be found in a [public Google sheet](https://docs.google.com/spreadsheets/d/1XG9VXdDxNd8ipgPlEr7Nb7Eg22twXPuzgDwsOhtdYKQ/edit#gid=894825456).\n",{"slug":8584,"featured":6,"template":678},"proposed-server-purchase-for-gitlab-com","content:en-us:blog:proposed-server-purchase-for-gitlab-com.yml","Proposed Server Purchase For Gitlab Com","en-us/blog/proposed-server-purchase-for-gitlab-com.yml","en-us/blog/proposed-server-purchase-for-gitlab-com",{"_path":8590,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8591,"content":8596,"config":8600,"_id":8602,"_type":16,"title":8603,"_source":17,"_file":8604,"_stem":8605,"_extension":20},"/en-us/blog/git-tips-and-tricks",{"title":8592,"description":8593,"ogTitle":8592,"ogDescription":8593,"noIndex":6,"ogImage":6582,"ogUrl":8594,"ogSiteName":692,"ogType":693,"canonicalUrls":8594,"schema":8595},"Git tips and tricks","Handy Git commands for everyday use","https://about.gitlab.com/blog/git-tips-and-tricks","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Git tips and tricks\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Achilleas Pipinellis\"}],\n        \"datePublished\": \"2016-12-08\",\n      }",{"title":8592,"description":8593,"authors":8597,"heroImage":6582,"date":8598,"body":8599,"category":14},[8273],"2016-12-08","\n\n[Git] comes with a ton of commands, and that's probably an understatement.\n\n[The internet] is full of Git tips and it's hard if not impossible to know\nthem all, but sometimes you stumble upon an aha! moment that changes your\nwhole workflow.\n\nIn this post, we gathered some Git tips and tricks we use at GitLab everyday.\nHopefully they will add up to your aha! moment.\n\n\u003C!-- more -->\n\n- TOC\n{:toc}\n\n## Intro\n\nAlmost everybody at GitLab will need to use Git at some point. For newcomers\nwho know nothing about Git that can be a fearsome experience. We have a\n[Git cheatsheet] and a `#git-help` chat channel where we ask questions and\nprovide help if some of us get stuck. That's a quick way to provide help, and\nif something is complicated or someone has messed up their local repository and\nneeds immediate help, there's always a person to jump on a quick call.\n\nHere's a pack of Git tricks that will leverage your Git-fu and you'll hopefully\nfind useful. Remember, the list is far from exhaustive :)\n\n## Git's built-in help\n\nThe majority of users rely on sites like [StackOverflow] to find answers to their\nGit problems, but how often do you use Git's built-in help to find more about a\ncommand you are struggling with?\n\n### The most common commands\n\nRun `git help` to print a list of the most common commands. You'll probably\nnotice you've used most of them, but how well do you really know them?\nThankfully, there is a help page for every command!\n\n### A help page for every command\n\nGit's documentation is comprehensive and is automatically installed with Git.\nRun `git help \u003Ccommand>` to find out all about a command's behavior and what\noptions it can take.\n\n### Git guides\n\nGit comes with a handful of guides ready for you to explore. Run `git help -g`\nto see what's available:\n\n```\nThe common Git guides are:\n\n   attributes   Defining attributes per path\n   everyday     Everyday Git With 20 Commands Or So\n   glossary     A Git glossary\n   ignore       Specifies intentionally untracked files to ignore\n   modules      Defining submodule properties\n   revisions    Specifying revisions and ranges for Git\n   tutorial     A tutorial introduction to Git (for version 1.5.1 or newer)\n   workflows    An overview of recommended workflows with Git\n```\n\nJump to a Git tutorial with `git help tutorial`, go through the glossary with\n`git help glossary` or learn about the most common commands with\n`git help everyday`.\n\n## See the repository status in your terminal's prompt\n\nIt's very useful to be able to visualize the status of your repository at any\ngiven time. While there are 3rd party tools that include this information\n([oh-my-zsh][ohmyzsh] anyone?), Git itself provides a script named `git-prompt.sh`\nthat does exactly that. You can [download it][gitprompt] and follow the\ninstructions in it to install and use it in your system. If you're using Linux\nand have installed Git with your package manager, it may already be\npresent on your system, usually under `/etc/bash_completion.d/`.\n\nGo ahead and replace your boring shell prompt with something like this:\n\n![Git shell prompt](https://about.gitlab.com/images/blogimages/git-tricks/git-shell-info.png){: .shadow}\n\n_Taken from oh-my-zsh's [themes wiki][git-shell-info-source]_\n\n## Autocompletion for Git commands\n\nYou may also find it useful to use the [completion scripts] that provide Git\ncommand completion for `bash`, `tcsh` and `zsh`. Again, follow the instructions\ninside the scripts to learn how to install them. Once done, you can try out\ntyping a command.\n\nLet's say you want to type `git pull`. If Git completion is enabled, typing\njust the first letter with `git p` followed by \u003Ckbd>Tab\u003C/kbd> will show the\nfollowing:\n\n```\npack-objects   -- create packed archive of objects\npack-redundant -- find redundant pack files\npack-refs      -- pack heads and tags for efficient repository access\nparse-remote   -- routines to help parsing remote repository access parameters\npatch-id       -- compute unique ID for a patch\nprune          -- prune all unreachable objects from the object database\nprune-packed   -- remove extra objects that are already in pack files\npull           -- fetch from and merge with another repository or local branch\npush           -- update remote refs along with associated objects\n```\n\nTo show all available commands, type `git` in your terminal followed by\n\u003Ckbd>Tab\u003C/kbd>+ \u003Ckbd>Tab\u003C/kbd>, and see the magic happening.\n\n![It's a kind of magic](https://media.giphy.com/media/12NUbkX6p4xOO4/giphy.gif)\n\n## Git plugins\n\nSince Git is free software, it's easy for people to write scripts that extend\nits functionality. Let's see some of the most common ones.\n\n### The `git-extras` plugin\n\nIf you want to enhance Git with more commands, you'll want to try out the\n[`git-extras` plugin][gitextras]. It includes commands like `git info` (show\ninformation about the repository), `git effort` (number of commits per file),\nand the list goes on. After you [install][extras-inst] it, make sure to visit\nthe [documentation on the provided commands][commands] in order to understand\nwhat each one does before using it.\n\n### The `git-open` plugin\n\nIf you want to quickly visit the website on which the repository you're on is\nhosted, `git-open` is for you. All major providers are supported (GitLab, GitHub,\nBitbucket) and you can even use them all at the same time if you set\nthem as different remotes.\n\n[Install it][install-open], and try it out by cloning a repository from\n[GitLab.com](https://gitlab.com/explore). From your terminal navigate to that\nrepository and run `git open` to be transferred to the project's page on\nGitLab.com.\n\nIt works by default for projects hosted on GitLab.com, but you can also use it\nwith your own GitLab instances. In that case, make sure to set up the domain\nname with:\n\n```bash\ngit config gitopen.gitlab.domain git.example.com\n```\n\nYou can even open different remotes and branches if they have been set up.\nRead more in the [examples section][git-open-examples].\n\n## `.gitconfig` on steroids\n\nThe `.gitconfig` file contains information on how you want Git to behave on\ncertain circumstances. There are options you can set at a repository level,\nbut you can also set them in a global `.gitconfig` so that all local config\nwill inherit its values. This file usually resides in your home directory.\nIf not, either you'll have to create it manually or it will be automatically\nbe created when you issue a command starting with `git config --global` as\nwe'll see below.\n\nThe very first encounter with `.gitconfig` was probably when you set your\nname and email address for Git to know who you are.\nTo know more about the options `.gitconfig` can take, see the [Git documentation\non `.gitconfig`][gitconfig].\n\nIf you are using macOS or Linux, `.gitconfig` will probably be hidden if you are\ntrying to open it from a file manager. Either make sure the hidden files are\nshown or open it using a command in the terminal: `atom ~/.gitconfig`.\n{: .alert .alert-info}\n\nLet's explore some of the most useful config options.\n\n###  Set a global `.gitignore`\n\nIf you want to avoid committing files like `.DS_Store`, Vim `swp` files, etc.,\nyou can set up a global `.gitignore` file.\n\nFirst create the file:\n\n```bash\ntouch ~/.gitignore\n```\n\nThen run:\n\n```bash\ngit config --global core.excludesFile ~/.gitignore\n```\n\nOr manually add the following to your `~/.gitconfig`:\n\n```ini\n[core]\n  excludesFile = ~/.gitignore\n```\n\nGradually build up your own useful list of things you want Git to ignore. Read\nthe [gitignore documentation](https://git-scm.com/docs/gitignore) to find out\nmore.\n\n---\n\n_[Git docs source](https://git-scm.com/docs/git-config#git-config-coreexcludesFile)_\n\n###  Delete local branches that have been removed from remote on fetch/pull\n\nYou might already have a bunch of stale branches in your local repository that\nno longer exist in the remote one. To delete them in each fetch/pull, run:\n\n```bash\ngit config --global fetch.prune true\n```\n\nOr manually add the following to your `~/.gitconfig`:\n\n```ini\n[fetch]\n  prune = true\n```\n\n---\n\n_[Git docs source](https://git-scm.com/docs/git-config#git-config-fetchprune)_\n\n### Enable Git's autosquash feature by default\n\nAutosquash makes it quicker and easier to squash or fixup commits during an\ninteractive rebase. It can be enabled for each rebase using\n`git rebase -i --autosquash`, but it's easier to turn it on by default.\n\n```bash\ngit config --global rebase.autosquash true\n```\n\nOr manually add the following to your `~/.gitconfig`:\n\n```ini\n[rebase]\n  autosquash = true\n```\n\nAt this point, let us remind you of [the perils of rebasing][rebase].\n{: .alert .alert-info}\n\n---\n\n_[Git docs source](https://git-scm.com/docs/git-config#git-config-rebaseautoSquash)_\n_([tip taken from thoughbot](https://github.com/thoughtbot/dotfiles/pull/377))_\n\n### Extra info when using Git submodules\n\nIf you are using [submodules], it might be useful to turn on the submodule summary.\nFrom your terminal run:\n\n```bash\ngit config --global status.submoduleSummary true\n```\n\nOr manually add the following to your `~/.gitconfig`:\n\n```ini\n[status]\n  submoduleSummary = true\n```\n\n---\n\n_[Git docs source](https://git-scm.com/docs/git-config#git-config-statussubmoduleSummary)_\n\n### Change the editor of Git's messages\n\nYou can change the default text editor for use by Git commands.\n\nFrom `git help var`:\nthe order of preference is the `$GIT_EDITOR` environment variable, then\n`core.editor` configuration, then `$VISUAL`, then `$EDITOR`, and then the\ndefault chosen at compile time, which is usually `vi`.\n\nRunning `git config --show-origin core.editor` will tell you if `core.editor`\nis set and from which file. This needs at least Git 2.8.\n\nTo change it to your favor editor (`vim`, `emacs`, `atom`, etc.), run:\n\n```bash\ngit config --global core.editor vim\n```\n\nOr manually add the following to your `~/.gitconfig`:\n\n```ini\n[core]\n  editor = vim\n```\n\n---\n\n_[Git docs source](https://git-scm.com/docs/git-config.html#git-config-coreeditor)_\n\n### Change the tool with which diffs are shown\n\n`git diff` is useful as it shows the changes that are not currently staged.\nWhen running this command Git usually uses its internal tool and displays\nthe changes in your terminal.\n\nIf you don't like the default difftool there are a couple of others to choose\nfrom:\n\n- `vimdiff` - [Vim's built-in vimdiff](http://vimdoc.sourceforge.net/htmldoc/diff.html)\n- `magit` - [Emacs most popular tool is Magit](https://www.emacswiki.org/emacs/Magit)\n- `meld` - [A visual diff and merge tool written in Python](http://meldmerge.org/)\n- `kdiff3` - [A diff and merge program written in Qt](http://kdiff3.sourceforge.net/)\n\nTo change the default tool for watching diffs run the following:\n\n```bash\ngit config --global diff.tool vimdiff\n```\n\nOr manually add the following to your `~/.gitconfig`:\n\n```ini\n[diff]\n  tool = vimdiff\n```\n\nAlso related is the `merge.tool` setting which can be set to a tool to be used\nas the merge resolution program. Similarly:\n\n```bash\ngit config --global merge.tool vimdiff\n```\n\nOr manually add the following to your `~/.gitconfig`:\n\n```ini\n[merge]\n  tool = vimdiff\n```\n\n---\n\n_[Git docs source](https://git-scm.com/docs/git-difftool)_\n\n## Aliases\n\nGit commands can take a lot of flags at a time. For example, for a log graph\nyou can use the following command:\n\n```bash\ngit log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr)%Creset' --abbrev-commit --date=relative\n```\n\nYou sure don't want to type this every time you need to run it. For that purpose,\nGit supports aliases, which are custom user-defined commands that build on top\nof the core ones. They are defined in `~/.gitconfig` under the `[alias]` group.\n\nOpen `~/.gitconfig` with your editor and start adding stuff.\n\n### Add an alias to pretty log graphs\n\nIn your `~/.gitconfig` add:\n\n```ini\n[alias]\n  lg = log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr)%Creset' --abbrev-commit --date=relative\n  lol = log --graph --decorate --pretty=oneline --abbrev-commit\n```\n\nNext time you want the pretty log to appear, run: `git lg` or `git lol` for\nsome pretty log graphs.\n\n### Add an alias to checkout merge requests locally\n\nA merge request contains all the history from a repository, plus the additional\ncommits added to the branch associated with the merge request. Note that you\ncan checkout a public merge request locally even if the source project is a fork\n(even a private fork) of the target project.\n\nTo checkout a merge request locally, add the following alias to your `~/.gitconfig`:\n\n```\n[alias]\n  mr = !sh -c 'git fetch $1 merge-requests/$2/head:mr-$1-$2 && git checkout mr-$1-$2' -\n```\n\nNow you can check out a particular merge request from any repository and any\nremote. For example, to check out the merge request with ID 5 as shown in GitLab\nfrom the `upstream` remote, run:\n\n```\ngit mr upstream 5\n```\n\nThis will fetch the merge request into a local `mr-upstream-5` branch and check\nit out. In the above example, `upstream` is the remote that points to GitLab\nwhich you can find out by running `git remote -v`.\n\n### The Oh-my-zsh Git aliases plugin\n\nIf you are an [Oh My Zsh][ohmyzsh] user you'll probably know this already.\nLearn how you can [enable the Git plugin][zshgit] provided with Oh My Zsh and\nstart using the short commands to save time. Some examples are:\n\n- `gl`  instead of `git pull`\n- `gp`  instead of `git push`\n- `gco` instead of `git checkout`\n\n## Git command line tips\n\nHere's a list of Git tips we gathered.\n\n### An alias of `HEAD`\n\nDid you know `@` is the same as `HEAD`? Using it during a rebase is a life saver:\n\n```bash\ngit rebase -i @~2\n```\n\n### Quickly checkout the previous branch you were on\n\nA dash (`-`) refers to the branch you were on before the current one.\nUse it to checkout the previous branch ([source][dash]):\n\n```sh\n# Checkout master\ngit checkout master\n\n# Create and checkout to a new branch\ngit checkout -b git-tips\n\n# Checkout master\ngit checkout master\n\n# Checkout to the previous branch (git-tips)\ngit checkout -\n```\n\n### Delete local branches which have already been merged into master\n\nIf you are working everyday on a project that gets contributions all the time,\nthe local branches number increases without noticing it. Run the following\ncommand to delete all local branches that are already merged into master\n([source][del-merged]):\n\n```bash\n# Make sure you have checked out master first\ngit checkout master\n\n# Delete merged branches to master except master\ngit branch --merged master | grep -v \"master\" | xargs -n 1 git branch -d\n```\n\nIn the event that you accidentally delete master (💩 happens),  get it back with:\n\n```bash\ngit checkout -b master origin/master\n```\n\n### Delete local branches that no longer exist in the remote repo\n\nTo remove all tracking branches that you have locally but are no more present in\nthe remote repository (`origin`):\n\n```bash\ngit remote prune origin\n```\n\nUse the `--dry-run` flag to only see what branches will be pruned, but not\nactually prune them:\n\n```bash\ngit remote prune origin --dry-run\n```\n\nIf you want this to be run automatically every time you fetch/pull, see\n[how to add it to your `.gitconfig`](#delete-local-branches-that-have-been-removed-from-remote-on-fetchpull).\n\n### Checking out a new branch from a base branch\n\nYou can checkout a new branch from a base branch without first checking out\nthe base branch. Confusing? Here's an example.\n\nIf you are on a branch named `old-branch` and you want to\ncheckout `new-branch` based off `master`, you'd normally do:\n\n```bash\ngit checkout master\ngit checkout -b new-branch\n```\n\nThere's a quicker way though. While still on the `old-branch`, run:\n\n```bash\ngit checkout -b new-branch master\n```\n\nThe pattern is the following:\n\n```bash\ngit checkout -b new_branch base_branch\n```\n\n## References\n\n- [Thoughbot's gitconfig file](https://github.com/thoughtbot/dotfiles/blob/master/gitconfig)\n  contains useful tips some of which are also present in this post\n- [A collection of Git tips](https://github.com/git-tips/tips/blob/master/README.md)\n- [Git and Vimdiff](http://usevim.com/2012/03/21/git-and-vimdiff/)\n- [Git's official site](https://git-scm.com/)\n\n## Conclusion\n\nAs always, writing something about Git, only scratches the surface. While some\nof the tips included in this post might come in handy, there are sure a lot\nof other stuff we're not familiar with.\n\n![Uncle Sam wants you to tell your trick](https://about.gitlab.com/images/blogimages/git-tricks/uncle-sam-wants-git.jpg){: .shadow}\n\n---\n\n_Image: \"[Branching illustration][img-url]\" by [Jason Long](https://twitter.com/jasonlong) is licensed under [CC BY 3.0](https://creativecommons.org/licenses/by/3.0/)_\n\n\u003C!-- Links -->\n\n[git-open-examples]: https://github.com/paulirish/git-open#examples\n[img-url]: https://github.com/git/git-scm.com/blob/master/public/images/branching-illustration%402x.png\n[install-open]: https://github.com/paulirish/git-open#installation\n[commands]: https://github.com/tj/git-extras/blob/master/Commands.md\n[gitextras]: https://github.com/tj/git-extras\n[zshgit]: https://github.com/robbyrussell/oh-my-zsh/wiki/Plugin:git\n[completion scripts]: https://github.com/git/git/tree/master/contrib/completion\n[gitprompt]: https://github.com/git/git/blob/master/contrib/completion/git-prompt.sh\n[git-shell-info-source]: https://github.com/robbyrussell/oh-my-zsh/wiki/Themes#kafeitu\n[del-merged]: http://stevenharman.net/git-clean-delete-already-merged-branches\n[dash]: https://twitter.com/holman/status/530490167522779137\n[ohmyzsh]: http://ohmyz.sh/\n[the internet]: /images/theinternet.png\n[gitconfig]: https://git-scm.com/docs/git-config\n[stackoverflow]: https://stackoverflow.com\n[extras-inst]: https://github.com/tj/git-extras/blob/master/Installation.md\n[rebase]: https://git-scm.com/book/en/v2/Git-Branching-Rebasing#The-Perils-of-Rebasing\n[submodules]: https://git-scm.com/book/en/v2/Git-Tools-Submodules\n[git]: https://git-scm.com\n[git cheatsheet]: https://gitlab.com/gitlab-com/marketing/raw/master/design/print/git-cheatsheet/print-pdf/git-cheatsheet.pdf\n",{"slug":8601,"featured":6,"template":678},"git-tips-and-tricks","content:en-us:blog:git-tips-and-tricks.yml","Git Tips And Tricks","en-us/blog/git-tips-and-tricks.yml","en-us/blog/git-tips-and-tricks",{"_path":8607,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8608,"content":8613,"config":8618,"_id":8620,"_type":16,"title":8621,"_source":17,"_file":8622,"_stem":8623,"_extension":20},"/en-us/blog/building-a-new-gitlab-docs-site-with-nanoc-gitlab-ci-and-gitlab-pages",{"title":8609,"description":8610,"ogTitle":8609,"ogDescription":8610,"noIndex":6,"ogImage":5308,"ogUrl":8611,"ogSiteName":692,"ogType":693,"canonicalUrls":8611,"schema":8612},"Building a new GitLab Docs site with Nanoc, GitLab CI, and GitLab Pages","How we built the new GitLab Docs portal from the ground up","https://about.gitlab.com/blog/building-a-new-gitlab-docs-site-with-nanoc-gitlab-ci-and-gitlab-pages","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Building a new GitLab Docs site with Nanoc, GitLab CI, and GitLab Pages\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Connor Shea\"}],\n        \"datePublished\": \"2016-12-07\",\n      }",{"title":8609,"description":8610,"authors":8614,"heroImage":5308,"date":8616,"body":8617,"category":14},[8615],"Connor Shea","2016-12-07","\nWe recently rebuilt [docs.gitlab.com](https://docs.gitlab.com) from scratch. Where previously the site was generated with a simple Ruby script, we now use a proper static site generator.\n\nCheck out the improvements we made, the structure we now use to deploy from specific directories in multiple repositories to a single website, build with [GitLab CI](/solutions/continuous-integration/) and deployed with [GitLab Pages][pages]. Now our documentation has a nicer look and feel, is more pleasant to read through, and simpler and quicker to maintain.\n\n\u003C!-- more -->\n\n- TOC\n{:toc}\n\n## Improvements\n\nThe old documentation website was pretty much just an HTML file, a stylesheet, and a [Ruby script][genrb] called `generate.rb`. While it worked, it was hard to update and not very flexible. It mostly laid dormant, only occasionally being touched by developers. The docs team really wanted to update the site to use a [static site generator](/blog/ssg-overview-gitlab-pages-part-3-examples-ci/) and take better advantage of [GitLab Pages][pages].\n\nWe chose [Nanoc](https://nanoc.ws/) because it’s fast, it comes with a number of built-in helpers and filters (as well as the ability to create custom ones), and it’s built with Ruby. Overall, we think this was definitely the right choice. The author was very responsive and addressed anything we brought up. Kudos to him on the great project!\n\nOther improvements include syntax highlighting with [Rouge](http://rouge.jneen.net/) (no syntax highlighting was used at all on the old site), breadcrumbs for navigating between pages, and an improved overall design – especially on mobile.\n\n## Requirements\n\nOur documentation site has some unique requirements that I haven’t seen mentioned or solved in any other companies’ blog posts. We have a few products with documentation we want to include in the site: Community Edition, Enterprise Edition, Omnibus GitLab, and GitLab Runner. In the future we’ll likely add more.\n\nEach product has it own repository with its own documentation directory. This allows developers to add documentation in the same merge request they add a new feature or change some behavior, which prevents documentation from becoming outdated.\n\nThe site also needed to be flexible enough that we could add versioning to it in the future. Eventually, our goal is to replace the Help section in CE/EE with this Docs site, so we need to maintain older versions of the documentation on the Docs site for users on older versions of GitLab.\n\n## The build process\n\nGiven the requirements and separate repositories, we decided we’d just need to clone the repositories as part of the build process.\n\nInside Nanoc's config file (`nanoc.yml`), we [have defined][nanocyaml] a hash of each of our products containing all the data we need. Here's an excerpt:\n\n```yaml\nproducts:\n  ce:\n    full_name: 'GitLab Community Edition'\n    short_name: 'Community Edition'\n    abbreviation: 'CE'\n    slug: 'ce'\n    index_file: 'README.*'\n    description: 'Browse user and administration documentation and guides for GitLab Community Edition.'\n    repo: 'https://gitlab.com/gitlab-org/gitlab-ce.git'\n    dirs:\n      temp_dir: 'tmp/ce/'\n      dest_dir: 'content/ce'\n      doc_dir:  'doc'\n\n...\n\n  runner:\n    full_name: 'GitLab Runner'\n    short_name: 'Runner'\n    abbreviation: 'RU'\n    slug: 'runner'\n    index_file: 'index.*'\n    description: 'Browse installation, configuration, maintenance, and troubleshooting documentation for GitLab Runner.'\n    repo: 'https://gitlab.com/gitlab-org/gitlab-runner.git'\n    dirs:\n      temp_dir: 'tmp/runner/'\n      dest_dir: 'content/runner'\n      doc_dir:  'docs'\n```\n\nWe then have the [Rakefile] where the repos are cloned and the directories that\nNanoc needs are created:\n\n```ruby\ndesc 'Pulls down the CE, EE, Omnibus and Runner git repos and merges the content of their doc directories into the nanoc site'\ntask :pull_repos do\n  require 'yaml'\n\n  # By default won't delete any directories, requires all relevant directories\n  # be empty. Run `RAKE_FORCE_DELETE=true rake pull_repos` to have directories\n  # deleted.\n  force_delete = ENV['RAKE_FORCE_DELETE']\n\n  # Parse the config file and create a hash.\n  config = YAML.load_file('./nanoc.yaml')\n\n  # Pull products data from the config.\n  ce = config[\"products\"][\"ce\"]\n  ee = config[\"products\"][\"ee\"]\n  omnibus = config[\"products\"][\"omnibus\"]\n  runner = config[\"products\"][\"runner\"]\n\n  products = [ce, ee, omnibus, runner]\n  dirs = []\n  products.each do |product|\n    dirs.push(product['dirs']['temp_dir'])\n    dirs.push(product['dirs']['dest_dir'])\n  end\n\n  if force_delete\n    puts \"WARNING: Are you sure you want to remove #{dirs.join(', ')}? [y/n]\"\n    exit unless STDIN.gets.index(/y/i) == 0\n\n    dirs.each do |dir|\n      puts \"\\n=> Deleting #{dir} if it exists\\n\"\n      FileUtils.rm_r(\"#{dir}\") if File.exist?(\"#{dir}\")\n    end\n  else\n    puts \"NOTE: The following directories must be empty otherwise this task \" +\n      \"will fail:\\n#{dirs.join(', ')}\"\n    puts \"If you want to force-delete the `tmp/` and `content/` folders so \\n\" +\n      \"the task will run without manual intervention, run \\n\" +\n      \"`RAKE_FORCE_DELETE=true rake pull_repos`.\"\n  end\n\n  dirs.each do |dir|\n    unless \"#{dir}\".start_with?(\"tmp\")\n\n      puts \"\\n=> Making an empty #{dir}\"\n      FileUtils.mkdir(\"#{dir}\") unless File.exist?(\"#{dir}\")\n    end\n  end\n\n  products.each do |product|\n    temp_dir = File.join(product['dirs']['temp_dir'])\n    puts \"\\n=> Cloning #{product['repo']} into #{temp_dir}\\n\"\n\n    `git clone #{product['repo']} #{temp_dir} --depth 1 --branch master`\n\n    temp_doc_dir = File.join(product['dirs']['temp_dir'], product['dirs']['doc_dir'], '.')\n    destination_dir = File.join(product['dirs']['dest_dir'])\n    puts \"\\n=> Copying #{temp_doc_dir} into #{destination_dir}\\n\"\n    FileUtils.cp_r(temp_doc_dir, destination_dir)\n  end\nend\n```\n\nThe `pull_repos` task inside the Rakefile is pretty self-explanatory if you know\nsome Ruby, but here's what it does:\n\n1. `nanoc.yml` is loaded since it contains the information we need for the\n   various products:\n\n    ```ruby\n    config = YAML.load_file('./nanoc.yaml')\n    ```\n\n1. The products data are pulled from the config:\n\n    ```ruby\n    ce = config[\"products\"][\"ce\"]\n    ee = config[\"products\"][\"ee\"]\n    omnibus = config[\"products\"][\"omnibus\"]\n    runner = config[\"products\"][\"runner\"]\n    ```\n\n1. The needed directories to be created (or deleted) are populated in an array:\n\n    ```ruby\n    products = [ce, ee, omnibus, runner]\n    dirs = []\n    products.each do |product|\n      dirs.push(product['dirs']['temp_dir'])\n      dirs.push(product['dirs']['dest_dir'])\n    end\n    ```\n\n1. The empty directories are created:\n\n    ```ruby\n    dirs.each do |dir|\n      unless \"#{dir}\".start_with?(\"tmp\")\n\n        puts \"\\n=> Making an empty #{dir}\"\n        FileUtils.mkdir(\"#{dir}\") unless File.exist?(\"#{dir}\")\n      end\n    end\n    ```\n1. We finally copy the contents of the documentation directory (defined by\n   `doc_dir`) for each product from `tmp/` to `content/`:\n\n    ```ruby\n    products.each do |product|\n      temp_dir = File.join(product['dirs']['temp_dir'])\n      puts \"\\n=> Cloning #{product['repo']} into #{temp_dir}\\n\"\n\n      `git clone #{product['repo']} #{temp_dir} --depth 1 --branch master`\n\n      temp_doc_dir = File.join(product['dirs']['temp_dir'], product['dirs']['doc_dir'], '.')\n      destination_dir = File.join(product['dirs']['dest_dir'])\n      puts \"\\n=> Copying #{temp_doc_dir} into #{destination_dir}\\n\"\n      FileUtils.cp_r(temp_doc_dir, destination_dir)\n    end\n    ```\n\n   `content/` is where Nanoc looks for the actual site’s Markdown files. To prevent the `tmp/` and `content/` subdirectories from being pushed after testing the site locally, they’re excluded by `.gitignore`.\n\nIn the future we may speed this up further by caching the `tmp` folder in CI. The task would need to be updated to check if the local repository is up-to-date with the remote, only cloning if they differ.\n\nNow that all the needed files are in order, we run `nanoc` to build the static sire. Nanoc runs each Markdown file through a series of [filters][nanoc-filters] defined by rules in the [`Rules` file][rules]. We currently use [Redcarpet][] as the Markdown parser along with Rouge for syntax highlighting, as well as some custom filters. We plan on [moving to Kramdown as our Markdown parser in the future](https://gitlab.com/gitlab-org/gitlab-docs/issues/50) as it provides some nice stuff like user-defined Table of Contents, etc.\n\nWe also define some filters inside the [`lib/filters/` directory][filtersdir],\nincluding one that [replaces any `.md` extension with `.html`][md2html].\n\nThe Table of Contents (ToC) is generated for each page except when it's named `index.md`\nor `README.md` as we usually use these as landing pages to index other\ndocumentation files and we don't want them to have a ToC. All this and some\nother options that Redcarpet provides [are defined in the `Rules` file][redrules].\n\nFor more on the specifics of building a site with Nanoc, see [the Nanoc tutorial](https://nanoc.ws/doc/tutorial/).\n\n## Taking advantage of GitLab to put everything together\n\nThe new docs portal is hosted on GitLab.com at \u003Chttps://gitlab.com/gitlab-org/gitlab-docs>.\nIn that project we create issues, discuss things, open merge requests in feature\nbranches, iterate on feedback and finally merge things in the `master` branch.\nAgain, the documentation source files are not stored in this repository, if\nyou want to contribute, you'd have to open a merge request to the respective\nproject.\n\nThere are 3 key things we use to test, build, deploy and host the Nanoc site\nall built into GitLab: [GitLab CI](/solutions/continuous-integration/), [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/)\nand [GitLab Pages][pages].\n\nLet's break it down to pieces.\n\n### GitLab CI\n\nGitLab CI is responsible of all the stages that we go through to publish\nnew documentation: test, build and deploy.\n\nNanoc has a built-in system of [Checks](https://nanoc.ws/doc/testing/), including HTML/CSS and internal/external link validation. With GitLab CI we test with the internal link checker (set to [`allow failure`][allowfail]) and also verify that the site compiles without errors. We also run a [SCSS Linter](https://github.com/sasstools/sass-lint) to make sure our SCSS looks uniform.\n\nOur full [`.gitlab-ci.yml`](https://gitlab.com/gitlab-org/gitlab-docs/blob/master/.gitlab-ci.yml) file looks like this. We'll break it down to make it clear what it is doing:\n\n```yaml\nimage: ruby:2.3\n\n## Cache the vendor/ruby directory\ncache:\n  key: \"ruby-231\"\n  paths:\n  - vendor/ruby\n\n## Define the stages\nstages:\n  - test\n  - deploy\n\n## Before each job's script is run, run the commands below\nbefore_script:\n  - ruby -v\n  - bundle install --jobs 4 --path vendor\n\n## Make sure the site builds successfully\nverify_compile:\n  stage: test\n  script:\n    - rake pull_repos\n    - nanoc\n  artifacts:\n    paths:\n      - public\n    expire_in: 1w\n  except:\n    - master\n  tags:\n    - docker\n\n## Check for dead internal links using Nanoc's built-in tool\ninternal_links:\n  stage: test\n  script:\n    - rake pull_repos\n    - nanoc\n    - nanoc check internal_links\n  allow_failure: true\n  tags:\n    - docker\n\n## Make sure our SCSS stylesheets are correctly defined\nscss_lint:\n  stage: test\n  script:\n    - npx sass-lint '**/*.scss' -v\n  tags:\n    - docker\n\n## A job that deploys a review app to a dedicated server running Nginx.\nreview:\n  stage: deploy\n  variables:\n    GIT_STRATEGY: none\n  before_script: []\n  cache: {}\n  script:\n    - rsync -av --delete public /srv/nginx/pages/$CI_BUILD_REF_NAME\n  environment:\n    name: review/$CI_BUILD_REF_NAME\n    url: http://$CI_BUILD_REF_NAME.$APPS_DOMAIN\n    on_stop: review_stop\n  only:\n    - branches@gitlab-org/gitlab-docs\n  except:\n    - master\n  tags:\n    - nginx\n    - review-apps\n\n## Stop the review app\nreview_stop:\n  stage: deploy\n  variables:\n    GIT_STRATEGY: none\n  before_script: []\n  artifacts: {}\n  cache: {}\n  dependencies: []\n  script:\n    - rm -rf public /srv/nginx/pages/$CI_BUILD_REF_NAME\n  when: manual\n  environment:\n    name: review/$CI_BUILD_REF_NAME\n    action: stop\n  only:\n    - branches@gitlab-org/gitlab-docs\n  except:\n    - master\n  tags:\n    - nginx\n    - review-apps\n\n## Deploy the static site to GitLab Pages\npages:\n  stage: deploy\n  environment:\n    name: production\n    url: https://docs.gitlab.com\n  script:\n    - rake pull_repos\n    - nanoc\n    # Symlink all README.html to index.html\n    - for i in `find public -name README.html`; do ln -sf README.html $(dirname $i)/index.html; done\n  artifacts:\n    paths:\n    - public\n    expire_in: 1h\n  only:\n    - master@gitlab-org/gitlab-docs\n  tags:\n    - docker\n```\n\nTo better visualize how the jobs are run, take a look at how the pipeline\ngraph looks like for [one of the pipelines][pipeline].\n\n![Pipeline graph example](https://about.gitlab.com/images/blogimages/new-gitlab-docs-site/pipeline-graph.png){: .shadow}\n\nLet's see what all these settings mean.\n\nFor more information, you can read the [documentation on `.gitlab-ci.yml`][ciyaml].\n{: .alert .alert-info}\n\n---\n\nDefine the Docker image to be used:\n\n```yaml\nimage: ruby:2.3\n```\n\n[Cache] the vendor/ruby directory so that we don't have to install the\ngems for each job/pipeline:\n\n```yaml\ncache:\n  key: \"ruby-231\"\n  paths:\n  - vendor/ruby\n```\n\nDefine the [stages] the jobs will run:\n\n```yaml\nstages:\n  - test\n  - deploy\n```\n\nBefore each job's script is run, run the commands that are defined in the\n[`before_script`][before_script]. Display the Ruby version and install\nthe needed gems:\n\n```yaml\nbefore_script:\n  - ruby -v\n  - bundle install --jobs 4 --path vendor\n```\n\nIn the `verify_compile` job we make sure the site builds successfully.\nIt first pulls the repos locally, then runs `nanoc` to compile the site.\nThe `public/` directory where the static site is built, is uploaded as\nan artifact so that it can pass between stages. We define an expire date of\none week. The job runs on all refs except master. The `docker` tag ensures that\nthis job is picked by the shared Runners on GitLab.com:\n\n```yaml\nverify_compile:\n  stage: test\n  script:\n    - rake pull_repos\n    - nanoc\n  artifacts:\n    paths:\n      - public\n    expire_in: 1w\n  except:\n    - master\n  tags:\n    - docker\n```\n\nIn the `internal_links` job we check for dead internal links using Nanoc's\nbuilt-in functionality. We first need to pull the repos and compile the static\nsite. We allow it to fail since the source of the dead links are in a\ndifferent repository, not much related with the current one.\nThe `docker` tag ensures that this job is picked by the shared Runners\non GitLab.com:\n\n```yaml\ninternal_links:\n  stage: test\n  script:\n    - rake pull_repos\n    - nanoc\n    - nanoc check internal_links\n  allow_failure: true\n  tags:\n    - docker\n```\n\nThe `scss_lint` job makes sure our SCSS stylesheets are correctly defined by\nrunning a linter on them. The `docker` tag ensures that this job is picked by\nthe shared Runners on GitLab.com:\n\n```yaml\nscss_lint:\n  stage: test\n  script:\n    - npx sass-lint '**/*.scss' -v\n  tags:\n    - docker\n\n```\n\nNext, we define the Review Apps.\n\n### Review Apps\n\nWhen opening a merge request for the docs site we use a new feature called [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/) to test changes. This lets us test new features, style changes, new sections, etc., by deploying the updated static site to a test domain. On every merge request that all jobs finished successfully, we can see a link with the URL to the temporary deployed docs site.\n\n![Review apps](https://about.gitlab.com/images/blogimages/gitlab-docs-review-apps-screenshot.png){: .shadow}\n\nWe define two additional jobs for that purpose in `.gitlab-ci.yml`:\n\n```yaml\nreview:\n  stage: deploy\n  variables:\n    GIT_STRATEGY: none\n  before_script: []\n  cache: {}\n  script:\n    - rsync -av --delete public /srv/nginx/pages/$CI_BUILD_REF_NAME\n  environment:\n    name: review/$CI_BUILD_REF_NAME\n    url: http://$CI_BUILD_REF_NAME.$APPS_DOMAIN\n    on_stop: review_stop\n  only:\n    - branches@gitlab-org/gitlab-docs\n  except:\n    - master\n  tags:\n    - nginx\n    - review-apps\n\nreview_stop:\n  stage: deploy\n  variables:\n    GIT_STRATEGY: none\n  before_script: []\n  artifacts: {}\n  cache: {}\n  dependencies: []\n  script:\n    - rm -rf public /srv/nginx/pages/$CI_BUILD_REF_NAME\n  when: manual\n  environment:\n    name: review/$CI_BUILD_REF_NAME\n    action: stop\n  only:\n    - branches@gitlab-org/gitlab-docs\n  except:\n    - master\n  tags:\n    - nginx\n    - review-apps\n```\n\nThey both run on all branches except `master` since `master` is deployed straight\nto production. Once someone with write access to the repository pushes a branch\nand creates a merge request, if the jobs in the `test` stage finish successfully,\nthe `review` job deploys the code of that particular branch to a server. The\nserver is set up to [use Nginx with Review Apps][nginx-example], and it uses\nthe artifacts from the previously `verify_compile` job which contain the\n`public/` directory with the HTML files Nanoc compiled.\n\nNotice that both jobs rely on [dynamic environments][environments] and with\nthe `review/` prefix we can group them under the [Environments page](https://gitlab.com/gitlab-org/gitlab-docs/environments).\n\nThe `review_stop` job depends on the `review` one and is called whenever we\nwant to clear up the review app. By default it is called every time the related\nbranch is deleted, but you can also manually call it with the buttons that can\nbe found in GitLab.\n\nThe trick of this particular set up is that we use the shared Runners provided\nin GitLab.com to test and build the docs site (using Docker containers) whereas\nwe use a specific Runner that is set up in the server that hosts the Review Apps\nand is configured with the [shell executor]. GitLab CI knows what Runner to use\neach time from the `tags` we provide each job with.\n\nThe `review` job has also some other things specified:\n\n```yaml\nvariables:\n  GIT_STRATEGY: none\nbefore_script: []\ncache: {}\n```\n\nIn this case, [`GIT_STRATEGY`][gitstrategy] is set up to `none` since we don't need to\ncheckout the repository for this job. We only use `rsync` to copy over the\nartifacts that were passed from the previous job to the server where Review\nApps are deployed. We also turn off the `before_script` since we don't need it\nto run, same for `cache`. They both are defined globally, so you need to pass\nan empty array and hash respectively to disable them in a job level.\n\nOn the other hand, setting the `GIT_STRATEGY` to `none` is necessary on the\n`review_stop` job so that the GitLab Runner won't try to checkout the code after\nthe branch is deleted. We also define one additional thing in it:\n\n```yaml\ndependencies: []\n```\n\nSince this is the last job that is performed in the lifecycle of a merge request\n(after it's merged and the branch deleted), we opt to not download any artifacts\nfrom the previous stage with passing an empty array in [`dependencies`][deps].\n\n---\n\nSee [our blog post on Review Apps](/blog/introducing-review-apps/) for\nmore information about how they work and their purpose. Be sure to also check\nthe [Review Apps documentation][radocs] as well as [how dynamic environments work][environments]\nsince they are the basis of the Review Apps.\n\nThe final step after the site gets successfully built is to deploy to\nproduction which is under the URL everybody knows: \u003Chttps://docs.gitlab.com>.\nFor that purpose, we use [GitLab Pages][pages].\n\n### GitLab Pages\n\n[GitLab Pages](https://pages.gitlab.io/) hosts [static websites](https://en.wikipedia.org/wiki/Static_web_page) and can be used with any Static Site Generator, including [Jekyll](https://jekyllrb.com/), [Hugo](https://gohugo.io/), [Middleman](https://middlemanapp.com/), [Pelican](http://blog.getpelican.com/), and of course Nanoc.\n\nGitLab Pages allows us to create the static site dynamically since it just deploys the `public` directory after the GitLab CI task is done. The job responsible for this is named `pages`.\n\nA production environment is set with a url to the of the docs portal.\nThe script pulls the repos, runs `nanoc` to compile the static site.\nThe `public/` directory where the static site is built, is uploaded as\nan artifact so that it can be deployed to GitLab Pages. We define an expire\ndate of one hour and the job runs only on the master branch.\nThe `docker` tag ensures that this job is picked by the shared Runners\non GitLab.com.\n\n```yaml\npages:\n  stage: deploy\n  environment:\n    name: production\n    url: https://docs.gitlab.com\n  script:\n    - rake pull_repos\n    - nanoc\n    # Symlink all README.html to index.html\n    - for i in `find public -name README.html`; do ln -sf README.html $(dirname $i)/index.html; done\n  artifacts:\n    paths:\n    - public\n    expire_in: 1h\n  only:\n    - master@gitlab-org/gitlab-docs\n  tags:\n    - docker\n```\n\nGitLab Pages deploys our documentation site whenever a commit is made to the master branch of the gitlab-docs repository and is run only on the `master` branch of the gitlab-docs project.\n\nSince the documentation content itself is not hosted under the gitlab-docs repository, we rely to a CI job under all the products we build the docs site from. We specifically [make use of triggers][triggers] where a build for the docs site is triggered whenever CI runs successfully on the master branches of CE, EE, Omnibus GitLab, or Runner. If you go to the [pipelines page of the gitlab-docs project][pipelines-docs], you can notice the **triggered** word next to the pipelines that are re-run because a trigger was initiated.\n\n![Pipeline triggers](https://about.gitlab.com/images/blogimages/new-gitlab-docs-site/pipelines-triggers.png){: .shadow}\n\nHow we specifically use triggers for gitlab-docs is briefly described in the\n[project's readme][readme-triggers].\n\nWe also use a hack to symlink all `README.html` files into `index.html` so that\nthey can be viewed without the extension. Notice how the following links point\nto the same document:\n\n- \u003Chttps://docs.gitlab.com/ee/ci/yaml/index.html>\n- \u003Chttps://docs.gitlab.com/ee/ci/yaml/>\n\nThe line responsible for this is:\n\n```bash\nfor i in `find public -name README.html`; do ln -sf README.html $(dirname $i)/index.html; done\n```\n\nThe artifacts are made to [expire in] an hour since they are deployed to the\nGitLab Pages server, we don't need them lingering in GitLab forever.\n\nIt’s worth noting that GitLab Pages is a [GitLab Enterprise Edition](/stages-devops-lifecycle/)-only feature, but it’s also available for free on GitLab.com.\n{: .alert .alert-info}\n\n## Conclusion\n\nHopefully this shows some GitLab's power and how having everything integrated into one cohesive product simplifies one's workflow. If you have a complex documentation site you’d like to put together from specific directories in multiple Git repositories, the process described above is the best we've been able to come up with. If you have any ideas to make this system better, let us know!\n\nThe documentation website is [open source](https://gitlab.com/gitlab-org/gitlab-docs), available under the MIT License. You’re welcome to take a look at it, submit a merge request, or even fork it to use it with your own project.\n\nThanks for reading, if you have any questions we’d be happy to answer them in the comments!\n\n\u003C!-- Cover image: https://unsplash.com/photos/G6G93jtU1vE -->\n\n[genrb]: https://gitlab.com/gitlab-com/doc-gitlab-com/blob/master/generate.rb\n[nanocyaml]: https://gitlab.com/gitlab-org/gitlab-docs/blob/30f13e6a81bf9baeda95204b5524c6abf980b1e5/nanoc.yaml#L101-149\n[Rakefile]: https://gitlab.com/gitlab-org/gitlab-docs/blob/30f13e6a81bf9baeda95204b5524c6abf980b1e5/Rakefile\n[md2html]: https://gitlab.com/gitlab-org/gitlab-docs/blob/30f13e6a81bf9baeda95204b5524c6abf980b1e5/lib/filters/markdown_to_html_ext.rb\n[redrules]: https://gitlab.com/gitlab-org/gitlab-docs/blob/30f13e6a81bf9baeda95204b5524c6abf980b1e5/Rules#L33-51\n[redcarpet]: https://github.com/vmg/redcarpet\n[allowfail]: https://docs.gitlab.com/ee/ci/yaml/#allow_failure\n[ciyaml]: https://docs.gitlab.com/ee/ci/yaml/\n[environments]: https://docs.gitlab.com/ee/ci/environments/index.html#dynamic-environments\n[pipeline]: https://gitlab.com/gitlab-org/gitlab-docs/pipelines/5266794\n[nginx-example]: https://gitlab.com/gitlab-examples/review-apps-nginx\n[radocs]: https://docs.gitlab.com/ee/ci/review_apps/index.html\n[shell executor]: https://docs.gitlab.com/runner/executors/shell.html\n[triggers]: https://docs.gitlab.com/ee/ci/triggers/\n[pipelines-docs]: https://gitlab.com/gitlab-org/gitlab-docs/pipelines\n[readme-triggers]: https://gitlab.com/gitlab-org/gitlab-docs/blob/master/README.md#deployment-process\n[gitstrategy]: https://docs.gitlab.com/ee/ci/runners/configure_runners.html#git-strategy\n[expire in]: https://docs.gitlab.com/ee/ci/yaml/#artifacts-expire_in\n[deps]: https://docs.gitlab.com/ee/ci/yaml/#dependencies\n[pages]: https://pages.gitlab.io\n[nanoc-filters]: https://nanoc.ws/doc/reference/filters/\n[rules]: https://gitlab.com/gitlab-org/gitlab-docs/blob/30f13e6a81bf9baeda95204b5524c6abf980b1e5/Rules\n[filtersdir]: https://gitlab.com/gitlab-org/gitlab-docs/tree/30f13e6a81bf9baeda95204b5524c6abf980b1e5/lib/filters\n[cache]: https://docs.gitlab.com/ee/ci/yaml/#cache\n[stages]: https://docs.gitlab.com/ee/ci/yaml/#stages\n[before_script]: https://docs.gitlab.com/ee/ci/yaml/#before_script\n",{"slug":8619,"featured":6,"template":678},"building-a-new-gitlab-docs-site-with-nanoc-gitlab-ci-and-gitlab-pages","content:en-us:blog:building-a-new-gitlab-docs-site-with-nanoc-gitlab-ci-and-gitlab-pages.yml","Building A New Gitlab Docs Site With Nanoc Gitlab Ci And Gitlab Pages","en-us/blog/building-a-new-gitlab-docs-site-with-nanoc-gitlab-ci-and-gitlab-pages.yml","en-us/blog/building-a-new-gitlab-docs-site-with-nanoc-gitlab-ci-and-gitlab-pages",{"_path":8625,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8626,"content":8632,"config":8636,"_id":8638,"_type":16,"title":8639,"_source":17,"_file":8640,"_stem":8641,"_extension":20},"/en-us/blog/how-to-keep-your-fork-up-to-date-with-its-origin",{"title":8627,"description":8628,"ogTitle":8627,"ogDescription":8628,"noIndex":6,"ogImage":8629,"ogUrl":8630,"ogSiteName":692,"ogType":693,"canonicalUrls":8630,"schema":8631},"How to keep your fork up to date with its origin","GitLab Repository Mirroring: the best way to keep your fork up-to-date!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666587/Blog/Hero%20Images/fork.png","https://about.gitlab.com/blog/how-to-keep-your-fork-up-to-date-with-its-origin","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to keep your fork up to date with its origin\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Marcia Ramos\"}],\n        \"datePublished\": \"2016-12-01\",\n      }",{"title":8627,"description":8628,"authors":8633,"heroImage":8629,"date":8634,"body":8635,"category":14},[8399],"2016-12-01","\nThis blog post has not been updated since publishing in 2016, so it contains outdated information. For an up-to-date source,\nplease see the documentation on [Repository Mirroring](https://docs.gitlab.com/ee/user/project/repository/mirror/index.html).\n{:.alert .alert-warning .black}\n\nHave you ever wondered how to keep a fork automatically up-to-date with its origin (**upstream**)?\n\nIn this post we'll show you how to do that on GitLab!\n\n\u003C!-- more -->\n\n## Fork\n\nAs you most likely know, a **fork** is a copy of a Git repository \"connected\" with the project you forked from (upstream). When you collaborate on code, it's pretty common forking a project, cloning to your local machine, making the changes you're up to, pushing to your fork, and submitting a [merge request (MR)](/solutions/continuous-integration/) to merge your code into the original project.\n\nYou fork a repository whenever you want to contribute to a project which you don't have access to, as it's not your own or your team's. This is how open source projects hosted by GitLab get so much collaboration from the community.\n\nWhen you are a member of a project (or a group), it's easier using the [GitLab Flow](/topics/version-control/what-is-gitlab-flow/), or _branching strategy_, since anyone in your team can pull your branch and preview your changes locally. By the way, with our brand-new solution for this, called [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/), you can preview your apps in a per-branch basis, for each MR submitted to GitLab, directly from a link generated by GitLab. Review Apps came to bring the [GitLab Workflow](/topics/version-control/what-is-gitlab-flow/) up to the next level! 🙌\n\n## The Problem\n\nWhen you fork a project and clone it to your local computer, you'll eventually need to update it with the `master` branch of the upstream project (or whatever the default branch is set to). That way you will be able to checkout an up-to-date branch to work from. If you do not do that, you are very likely to stumble upon merge conflicts in the end, since the copy of the code you're working could be out-of-date.\n\nTo prevent this, first you'd need to pull the upstream, push to the remote, checkout a new branch from the `master` branch up-to-date, then finally start working on your changes. This takes time, and if you forget to do it, can cause a lot of pain. This process takes some time to complete, and turns out to be annoying if you need to do this multiple times a day. That's why we could use a better solution.\n\n## The Solution\n\nGitLab can do that for you with no pain! Yay! What you need to do is very simple: enable [GitLab Repository Mirroring](https://docs.gitlab.com/ee/user/project/repository/mirror/index.html)!\n\n\u003Ci class=\"far fa-arrow-alt-circle-right gitlab-purple\" aria-hidden=\"true\">\u003C/i>\n**First**. Mirror your fork:\n\nUnder your forked project's **Settings**, navigate to **Mirror Repository**:\n\n![settings - mirror repository](https://about.gitlab.com/images/blogimages/how-to-keep-your-fork-up-to-date-with-its-origin/mirror-repository-settings.png){:.shadow}\n\n\u003Ci class=\"far fa-arrow-alt-circle-right gitlab-purple\" aria-hidden=\"true\">\u003C/i>\n**Second**. Add the upstream's path to the field **Git repository URL**, then enable automatic mirroring:\n\n![fork - enable automatic repository update](https://about.gitlab.com/images/blogimages/how-to-keep-your-fork-up-to-date-with-its-origin/setup-automatic-mirror.png){:.shadow}\n\n\u003Ci class=\"far fa-arrow-alt-circle-right gitlab-purple\" aria-hidden=\"true\">\u003C/i>\n**Third**. Set up the **mirror user**: it's the user whose new commits to the upstream project will be attributed to in your fork.\n\n\u003Ci class=\"fas fa-check-circle-o\" aria-hidden=\"true\" style=\"color: green\">\u003C/i>\n**Done**! Once an hour, GitLab will pull the upstream for you, and keep your fork up-to-date!\n\n![fork - repo successfully mirrored](https://about.gitlab.com/images/blogimages/how-to-keep-your-fork-up-to-date-with-its-origin/repository-mirrored.png){:.shadow}\n\nBy doing so, you only need to proceed as you usually do for your own projects (pull, checkout a new branch, and push your commits). All the rest GitLab does for you!\n\nSimple, isn't it?\n\n## The Secret\n\nIn order to pull without authentication from an upstream repository you are contributing to, the project should be public in the first place. Also, the **Repository**'s **Feature Visibility** settings of the upstream project needs to be set to **Everyone with access**:\n\n![feature visibility settings](https://about.gitlab.com/images/blogimages/how-to-keep-your-fork-up-to-date-with-its-origin/feature-visibility-settings.png){:.shadow}\n\nYou can find them in your project's **Settings** (\u003Ci class=\"fas fa-cog\" aria-hidden=\"true\" style=\"color: grey\">\u003C/i>) > **Edit Project**.\n\nNote that you can mirror private projects as well, but you'd need to be a member of the project or the group it belongs.\n\n## What is Your Solution?\n\nWe would love to know how you do that! Do you have a different solution? You can certainly help others. Please share them in the comments, so everyone from the community can decide which solution is the best for them. Thank you!\n\n## Conclusion\n\nHopefully, we provided you with an easy solution for keeping your fork up-to-date. Remember, you can even mirror repositories hosted in other Git platforms!\n\nCool! I'm looking forward to hearing from you: feedback, questions, and suggestions are very welcome! Leave your comment below, and/or tweet at us [@GitLab](https://twitter.com/gitlab)! We \u003Ci class=\"fas fa-heart gitlab-purple\" aria-hidden=\"true\">\u003C/i> our community!\n\n\u003C!-- cover image: https://unsplash.com/photos/8yqds_91OLw -->\n",{"slug":8637,"featured":6,"template":678},"how-to-keep-your-fork-up-to-date-with-its-origin","content:en-us:blog:how-to-keep-your-fork-up-to-date-with-its-origin.yml","How To Keep Your Fork Up To Date With Its Origin","en-us/blog/how-to-keep-your-fork-up-to-date-with-its-origin.yml","en-us/blog/how-to-keep-your-fork-up-to-date-with-its-origin",{"_path":8643,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8644,"content":8650,"config":8654,"_id":8656,"_type":16,"title":8657,"_source":17,"_file":8658,"_stem":8659,"_extension":20},"/en-us/blog/gitlab-is-a-slam-dunk",{"title":8645,"description":8646,"ogTitle":8645,"ogDescription":8646,"noIndex":6,"ogImage":8647,"ogUrl":8648,"ogSiteName":692,"ogType":693,"canonicalUrls":8648,"schema":8649},"'GitLab is a slam dunk': One team lead weighs his options","Developer Warren Postma shares his opinions on VCS, after years of trying out all the alternatives.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663798/Blog/Hero%20Images/gitlab-is-a-slam-dunk.jpg","https://about.gitlab.com/blog/gitlab-is-a-slam-dunk","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"'GitLab is a slam dunk': One team lead weighs his options\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emily von Hoffmann\"}],\n        \"datePublished\": \"2016-11-27\",\n      }",{"title":8645,"description":8646,"authors":8651,"heroImage":8647,"date":8652,"body":8653,"category":14},[6728],"2016-11-27","\nWarren Postma is a team lead and \"de facto DevOps guy\" at [RamSoft](https://www.ramsoft.com/), but like so many he's gotten hooked on [contributing](https://gitlab.com/warren.postma) to GitLab in his spare time. After becoming familiar with GitHub, Atlassian, and Mercurial in previous jobs, he felt strongly that Git and GitLab were the best choice for his current team. Since reaching that conclusion, he's also assisted his peers and former colleagues in their switch to GitLab, so I wanted to hear his opinions - they're both strong and numerous, which made for a fun conversation.\n\n\u003C!--more-->\n\nHere are some highlights:\n* In a team distributed across timezones and languages, the decentralized nature of Git and GitLab is a huge win.\n* If you pair people for code review who are in the same timezone, you can prevent having merge requests stay open for days or weeks.\n* When switching to GitLab CI, the biggest barrier is training and removing ingrained habits - especially if people are also new to Git.\n\n**Can you briefly describe the kinds of projects you use GitLab for?**\n{: .alert .alert-info}\n\n**Warren:** I use it for both personal projects and for my job. I switched a team at a small healthcare company from Subversion to Git, taught some of the team members their first steps in Git, and I switched from preferring Mercurial (another DVCS) to preferring Git, chiefly because I really like GitLab. There is nothing like GitLab for Mercurial although there is a project called RhodeCode and its fork Kallithea that comes close.  I spun up our private GitLab instance and taught our IT people how to maintain it (not hard really).\n\nI also have counseled other people to switch to GitLab, and I'm generally most active in doing so among peers. I know a guy who runs a small software company and I'm helping him switch his team over to GitLab. It feels like I could be finding my niche.\n\n**What is the makeup of your team at RamSoft, and what is your workflow like?**\n{: .alert .alert-info}\n\n**Warren:** Our workflow and culture is evolving rapidly. My team is multi-cultural, multi-language, multi-everything, in multiple physical sites.  One part of our team is in Toronto, another part is in Vietnam. The decentralized nature of Git and GitLab is a major win for us. We have two merge request (MR) pipelines. We have all Toronto devs code-review via MR with someone in the same timezone because our MRs should be short-lived MRs, reviewed and merged quickly.  We don't have MRs that stay open for days or weeks.  We don't use WIP much yet, but I am starting to advocate for WIP merge requests for collaborative feature development. Since we're in the same office, we can mostly work in pairs when our work requires it. GitLab provides a history of our work, and our reviews, and MRs are like a project history.\n\n**How did you decide to get started with GitLab?**\n{: .alert .alert-info}\n\n**Warren:** It was easy, I chose Ubuntu (I can't remember if it was 12 or 14) LTS and it was around GitLab 8.0, September 2015 or so, and I simply copied the installation commands from the website and pasted it into terminal.  I needed a bit of help from my IT guy to set up LDAP to our ActiveDirectory, and we were good.\nOur company is 99% composed of Windows-only technical people, I'm a Linux guy working in a Windows company.  I'd say that in many companies this is a significant barrier to getting up and running.  If I could suggest something, it would be to provide a Hyper-V and a VMWare image of a working private GitLab, which can be downloaded and run by anyone. I think that should be done in-house as opposed to via Bitnami. I have used Bitnami products, and I have nothing against them, but I see the opportunity for GitLab to provide technical support even to smaller corporations as a significant opportunity to GitLab, that it should be taking on.\n\n**Is there anything you feel you can do in GitLab that you can't do with other tools?**\n{: .alert .alert-info}\n\n**Warren:** GitLab is about integration of tools. I don't want to set everything up separately. GitLab provides an all-in-one version control server, bug tracker (issues), kanban board (issues in progress, completed, in production), merge request handling (code reviews). All those tools exist in many flavors, but none so well integrated, and I can self-host.\n\nYour closest competitor is Bitbucket, in my opinion, which can be run privately but the private Bitbucket is closed-source I believe, and you can't get it for free, so GitLab CE is really without peer. Since I don't work in very large companies, GitLab EE is something I haven't spent much time on, but I can see that even a serious small company might want to buy GitLab EE because having support is a major feature. If I wasn't working at my current company, the level of Linux knowledge in house would take a major dive, and they'd probably want to buy a support contract to fix their GitLab if it went down, or help diagnose major issues.\n\nPrivately Hosted (on my computer or VM) + Free + OpenSource = Made of Win. I can't see choosing anything else. I see GitHub as problematic, something I [blogged about]( http://linuxcodemonkey.blogspot.ca/2016/11/gitlab-all-things.html)\n\n**What is the main reason you would recommend GitLab to another dev?**\n{: .alert .alert-info}\n\n**Warren:** Once the choice is made to use Git, to me, GitLab is a slam dunk decision. Software companies, whether small, medium, or large, and even single developers who are professionals who get paid, should be hosting their own in-house GitLab, and perhaps having that back up to some off-site repo (say Bitbucket or GitLab.com). Single developers could be quite happy with Bitbucket also. I happen to still love Bitbucket, but happen to distrust GitHub. I only use GitHub when I have to use it, which is to make MRs to projects that are on GitHub.\n\nPeople who I have talked to, and I have even given presentations to people at user groups, are usually interested in GitLab because:\n\n1. It lets you host your own private Git server.\n2. It has a pretty impressive set of features, issue tracking, merge request handling, and [continuous integration server](https://about.gitlab.com/topics/ci-cd/continuous-integration-server/) built in.\n\nI am working on a community blog post on switching a small healthcare software company from Jenkins to GitLab CI. So far, I have found that the only difficulty is in training team members. Moving ingrained practices and working against ingrained tendencies is the hardest thing. We previously had a single-branch trunk-based monorepo culture, and switching to Git, to me, was only sensible if we changed our practices to work how Git is meant to be used.\n\nA pet peeve of mine can be summed up in the saying \"When all you have is a hammer, everything looks like a nail.\" In the world of Subversion and Git, what I see over and over is people who take a 10 - 50 GiB Subversion monorepo and then just \"import all 50 GiB, and all 500,000 svn commit revs into Git\". They are seldom impressed with the results of this, and it seldom occurs to them to question their initial preconception that this was the right and obvious way to move from SVN to Git. I am not sure if there exists a comprehensive re-education plan for Subversion users, but I think perhaps I should write one. There was a start towards this when Joel Spolsky tried something called \"[hginit.com](http://hginit.com/)\", retraining Subversion victims in how to work with Mercurial. A similar practice for corporate teams moving from Subversion to Git would be a great educational resource that I think GitLab could provide. Some pretty good material in that vein is already on the internet and is provided by Atlassian, as part of their Bitbucket docs.\n\nIn terms of companies and products that I consider to be almost \"peers\" of GitLab, perhaps Bitbucket is closest. I prefer GitLab because the product is open source, the people are great, and the growing community around GitLab is also great. Oh and the product is growing at a fantastic pace. Just watching it from mid 2015 to today, the pace of innovation has been boggling. I have also collaborated with GitLab community members to create add-ons and tools. One guy made a Python-based tool that uses the GitLab API to [expire and delete old artifacts](https://github.com/JonathonReinhart/gitlab-artifact-cleanup) *site-wide*. Another guy made a Go-based tool that copies Issues and Issue labels from one project to another. All great plugins from the community!\n\n_If your team uses GitLab and is interested in sharing your story, please fill out this [form]( https://docs.google.com/a/gitlab.com/forms/d/1K8ZTS1QvSSPos6mVh1ol8ZyagInYctX3fb9eglzeK70/edit)  and we’ll get in touch!_\n\n_Follow Warren on [Twitter](https://twitter.com/warrenpostma)_\n\n_Tweet [@GitLab](https://twitter.com/gitlab) and check out our [job openings](/jobs/)._\n",{"slug":8655,"featured":6,"template":678},"gitlab-is-a-slam-dunk","content:en-us:blog:gitlab-is-a-slam-dunk.yml","Gitlab Is A Slam Dunk","en-us/blog/gitlab-is-a-slam-dunk.yml","en-us/blog/gitlab-is-a-slam-dunk",{"_path":8661,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8662,"content":8668,"config":8673,"_id":8675,"_type":16,"title":8676,"_source":17,"_file":8677,"_stem":8678,"_extension":20},"/en-us/blog/why-choose-bare-metal",{"title":8663,"description":8664,"ogTitle":8663,"ogDescription":8664,"noIndex":6,"ogImage":8665,"ogUrl":8666,"ogSiteName":692,"ogType":693,"canonicalUrls":8666,"schema":8667},"How we knew it was time to leave the cloud","How we're solving storage and performance issues as we scale.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683600/Blog/Hero%20Images/data.png","https://about.gitlab.com/blog/why-choose-bare-metal","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we knew it was time to leave the cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Pablo Carranza\"}],\n        \"datePublished\": \"2016-11-10\",\n      }",{"title":8663,"description":8664,"authors":8669,"heroImage":8665,"date":8671,"body":8672,"category":14},[8670],"Pablo Carranza","2016-11-10","\n\n{::options parse_block_html=\"true\" /}\n\nIn my last [infrastructure update][infra-post], I documented our challenges with\nstorage as GitLab scales. We built a CephFS cluster to tackle both the capacity\nand performance issues of NFS and decided to replace PostgreSQL standard Vacuum\nwith the pg_repack extension. Now, we're feeling the pain of running a high\nperformance distributed filesystem on the cloud.\n\nOver the past month, we loaded a lot of projects, users, and CI artifacts onto\nCephFS. We chose CephFS because it's a reliable distributed file system that can\ngrow capacity to the petabyte, making it virtually infinite, and we needed\nstorage. By going with CephFS, we could push the solution into the infrastructure\ninstead of creating a complicated application. The problem with CephFS is that\nin order to work, it needs to have a really performant underlaying infrastructure\nbecause it needs to read and write a lot of things really fast.\nIf one of the hosts delays writing to the journal, then the rest of the fleet is\nwaiting for that operation alone, and the whole file system is blocked. When this happens,\nall of the hosts halt, and you have a locked file system; no one can read or\nwrite anything and that basically takes everything down.\n\n![osd-journal-latency](https://about.gitlab.com/images/blogimages/osd-journal-latency.png)\n\nWhat we learned is that when you get into the consistency, accessibility, and\npartition tolerance (CAP) of CephFS, it will just give away availability in\nexchange for consistency. We also learned that when you put a lot of pressure on\nthe system, it will generate hot spots. For example, in specific places in the\ncluster of machines hosting the GitLab CE repo, all the reads and\nwrites end up being on the same spot during high load times. This problem is\namplified because we hosted the system in the cloud where there is not a minimum\nSLA for IO latency.\n\n## Performance Issues on the Cloud\n\nBy choosing to use the cloud, we are by default sharing infrastructure with a\nlot of other people. The cloud is timesharing, i.e. you share the\nmachine with others on the providers resources. As such, the provider has to\nensure that everyone gets a fair slice of the time share. To do this, providers\nplace performance limits and thresholds on the services they provide.\n\nOn our server, GitLab can only perform 20,000 IOPS but the low limit is 0.\nWith this performance capacity, we became the \"noisy neighbors\" on the shared\nmachines, using all of the resources. We became the neighbor who plays their\nmusic loud and really late. So, we were punished with latencies. Providers don't\nprovide a minimum IOPS, so they can just drop you. If we wanted to make the disk\nreach something, we would have to wait 100 ms latency.\n[That's basically telling us to wait 8 years][space-time-article]. What we found\nis that the cloud was not meant to provide the level of IOPS performance we needed\nto run an aggressive system like CephFS.\n\nAt a small scale, the cloud is cheaper and sufficient for many projects.\nHowever, if you need to scale, it's not so easy. It's often sold as, \"If you\nneed to scale and add more machines, you can spawn them because the cloud is\n'infinite'\". What we discovered is that yes, you can keep spawning more\nmachines but there is a threshold in time, particularly when you're adding heavy\nIOPS, where it becomes less effective and very expensive. You'll still have to\npay for bigger machines. The nature of the cloud is time sharing so you still\nwill not get the best performance. When it comes down to it, you're paying a lot\nof money to get a subpar level of service while still needing more performance.\n\nSo, what happens when the cloud is just not enough?\n\n## Moving to Bare Metal\n\nAt this point, moving to dedicated hardware makes sense for us. From a cost\nperspective, it is more economical and reliable because of how the culture of\nthe cloud works and the level of performance we need. Of course hardware comes\nwith it's upfront costs: components will fail and need to be replaced. This\nrequires services and support that we currently don't have today. You have to\nknow the hardware you are getting into and put a lot more effort into keeping it\nalive. But in the long run, it will make GitLab more efficient, consistent,\nand reliable as we will have more ownership of the entire infrastructure.\n\n## How We Proactively Uncover Issues\n\nAt GitLab, we are able to proactively uncover issues like this because we are\nbuilding an observable system as a way to understand how\nour system behaves. The machine is doing a lot of things, most of which we are\nnot even aware of. To get a deeper look at what's happening, we gather data and\nmetrics into Prometheus to build dashboards and observe trends.\n\nThese metrics are in the depth of the kernel and not readily visible to humans.\nTo see it, you need to build a system that allows you to pull, aggregate, and\ngraph this data in a way you can see it. Graphs are great because you can get a\nlot of data in one screen and read it with a simple glance.\n\nFor example, our fleet overview dashboard shows how many different workers are\nperforming in one view:\n\n![workers-load](https://about.gitlab.com/images/blogimages/workers-load.png)\n\n![workers-wait](https://about.gitlab.com/images/blogimages/workers-wait.png)\n\n### How we used our dashboard to understand CephFS in the cloud\n\n\nBelow, you can see OSD Journal Latency. You can see how, over the last 7 days shown, we had a spike.\n\n![osd-journal-latency-one-week](https://about.gitlab.com/images/blogimages/osd-journal-latency-one-week.png)\n\nThis is how much time we spent trying to write to this journal disk. In general,\nwe roughly perform commit data to this journal within 2 to 12 seconds. You can\nsee where it jumps to 42 seconds to complete -- that delay is where we are being\npunished. The high spikes show GitLab.com is down.\n\nWhat's great about having this dashboard is that there is a lot of data available\nquickly, in one place. Non-technical people can understand this. This is the\nlevel of insight into your system you want to aim for. You can build on your own\nwith [Prometheus][prometheus]. We have been building this for the last month, it's close to the\nend state. We're still working on it but to add more things.\n\nThis is how we make informed decisions to understand as best as we can what is\ngoing on with our infrastructure. What we tend to do is whenever we see\na service failing or performing in a way that is unexpected, we pull together a\ndashboard to highlight the underlaying data to help us understand what's happening,\nand how things are being impacted on a larger scale. Usually monitoring is an afterthought\nbut we are changing this by shipping more and more detailed and comprehensive\nmonitoring with GitLab. Without detailed monitoring you are just guessing at\nwhat is going on within your environment and systems.\n\nThe bottom line is that once you have moved beyond a handful of systems it is no\nlonger feasible to run one-off commands to try and understand what is happening\nwithin your infrastructure. True insight can only be gained by having enough\ndata to make informed decisions with.\n\n\n## Recap: What We Learned\n\n1. CephFS gives us more scalability and ostensibly performance but did not work well in the cloud on shared resources, despite tweaking and tuning it to try to make it work.\n1. There is a threshold of performance on the cloud and if you need more, you will have to pay a lot more, be punished with latencies, or leave the cloud.\n1. Moving to dedicated hardware is more economical and reliable for the scale and performance of our application.\n1. Building an observable system by pulling and aggregating performance data into understandable dashboards helps us spot non-obvious trends and correlations, leading to addressing issues faster.\n1. Monitoring some things can be really application specific which is why we are [building our own gitlab-monitor Prometheus exporter][prom-exporter]. We plan to ship this with GitLab CE soon.\n\n\u003C!-- identifiers -->\n\n[infra-post]: /blog/infrastructure-update/\n[prom-exporter]: https://gitlab.com/gitlab-org/omnibus-gitlab/issues/1481\n[prometheus]: https://prometheus.io/\n[space-time-article]: https://blog.codinghorror.com/the-infinite-space-between-words/\n",{"slug":8674,"featured":6,"template":678},"why-choose-bare-metal","content:en-us:blog:why-choose-bare-metal.yml","Why Choose Bare Metal","en-us/blog/why-choose-bare-metal.yml","en-us/blog/why-choose-bare-metal",{"_path":8680,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8681,"content":8686,"config":8690,"_id":8692,"_type":16,"title":8693,"_source":17,"_file":8694,"_stem":8695,"_extension":20},"/en-us/blog/publish-code-coverage-report-with-gitlab-pages",{"title":8682,"description":8682,"ogTitle":8682,"ogDescription":8682,"noIndex":6,"ogImage":8683,"ogUrl":8684,"ogSiteName":692,"ogType":693,"canonicalUrls":8684,"schema":8685},"Publish code coverage report with GitLab Pages","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672293/Blog/Hero%20Images/code-coverage-report-stats.png","https://about.gitlab.com/blog/publish-code-coverage-report-with-gitlab-pages","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Publish code coverage report with GitLab Pages\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Grzegorz Bizon\"}],\n        \"datePublished\": \"2016-11-03\",\n      }",{"title":8682,"description":8682,"authors":8687,"heroImage":8683,"date":8688,"body":8689,"category":14},[3444],"2016-11-03","\n\nAt GitLab, we believe that everyone can contribute. We also use automated\ntesting extensively to make contributing to GitLab easier. Using automated\ntesting is a great way to improve confidence when someone needs to change\nthe code, which actually is the case in the majority of contributions to\nsoftware projects.\n\nBut how do we ensure that our test suite covers enough to aid the confidence\nin changing behavior of the software, and what can we do to keep on improving\nit?\n\n\u003C!-- more -->\n\n## What is code coverage?\n\nUsing the [code coverage](https://en.wikipedia.org/wiki/Code_coverage) metric is a\ntechnique that helps to improve the test suite, development process, and the software itself.\n\nTools used to measure the code coverage percentage usually extend the test harness\nenvironment and make it possible to map the application execution process\nback to the [source code](/solutions/source-code-management/) while automated tests are being executed. With that\napproach, you can not only learn how much of your code is covered by tests,\nbut it is also possible to find out what exact parts of the codebase are not\ncovered well enough.\n\nSome coverage analysis tools also make it possible to generate code coverage reports in HTML\nformat that you can then view in your browser. It makes it much easier to\ninspect the areas of code that are missing tests and are likely to need some\nimprovements as well.\n\nYou can take a look at the Ruby [code coverage report for GitLab](http://gitlab-org.gitlab.io/gitlab-ce/coverage-ruby/)\nthat is hosted on [GitLab Pages](https://pages.gitlab.io).\n\n![Code coverage report summary](https://about.gitlab.com/images/blogimages/publish-code-coverage-report-with-gitlab-pages/code-coverage-report-file-summary.png)\n\n## How to generate a code coverage report\n\nThere are a lot of code coverage tools available for many different languages,\nand you will need to find the most appropriate option for your particular needs. At GitLab, with\nprojects using Ruby, we often use [SimpleCov](https://github.com/colszowka/simplecov).\n\nYou will need to check the documentation for your tool of choice to learn how to\ngenerate the code coverage report. Once you are able to do this locally,\ncheck out the rest of this tutorial to learn how to publish the report with\n[GitLab Pages](https://pages.gitlab.io)!\n\nFor the sake of this example, we will assume that you are using Ruby with RSpec\nand SimpleCov.\n\n### How to configure your tools\n\nConfiguring SimpleCov can be as simple as extending your `spec_helper.rb` with:\n\n```ruby\nrequire 'simplecov'\nSimpleCov.start\n```\n\nWhen you run the `rspec` command, you will notice the code coverage report being\ngenerated when tests are completed. The RSpec example below comes from a very simple\ncode that contains a single test for the single class that is there:\n\n\u003Ci class=\"far fa-file-code\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n`spec/dog_spec.rb`\n\n```ruby\ndescribe Dog do\n  it 'barks' do\n    expect(subject.bark).to eq 'Woof, woof!'\n  end\nend\n```\n\n\u003Ci class=\"far fa-file-code\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n`dog.rb`\n\n```ruby\nclass Dog\n  def bark\n    'Woof, woof!'\n  end\nend\n```\n\nAnd the RSpec test harness output is:\n\n```text\nDog\n  barks\n\nFinished in 0.00058 seconds (files took 0.08804 seconds to load)\n1 example, 0 failures\n\nCoverage report generated for RSpec to /tmp/coverage_example/coverage. 6 / 6 LOC (100.0%) covered.\n```\n\nAt the end of the output, you can see that code coverage report was generated\nto the `coverage/` directory whose contents look like:\n\n```bash\n$ ls coverage/\nassets/ index.html\n```\n\nYes! This is an HTML code coverage report that we can publish with GitLab Pages!\n\n### GitLab CI configuration\n\n\u003Ci class=\"fas fa-info-circle\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\nTake a look at [our documentation](https://docs.gitlab.com/ee/ci/yaml/)\nto learn more about how to use `.gitlab-ci.yml`.\n{: .alert .alert-info}\n\nThe GitLab [CI configuration](/solutions/continuous-integration/) can be defined in `.gitlab-ci.yml` file. Let's go\nthrough the configuration that is necessary to publish coverage report with\nGitLab Pages.\n\n---\n\n\u003Ci class=\"fas fa-arrow-alt-circle-right\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n**1. Run the RSpec test suite first**\n\nThe most simple approach is to execute all tests within a single job in the\nCI pipeline:\n\n```yaml\nimage: ruby:2.3\n\nrspec:\n  script:\n    - bundle install\n    - rspec\n```\n\n\u003Ci class=\"fas fa-arrow-alt-circle-right\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n**2. Store the result as build artifacts**\n\n```yaml\nimage: ruby:2.3\n\nrspec:\n  script:\n    - bundle install\n    - rspec\n  artifacts:\n    paths:\n      - coverage/\n```\n\nLet's see if artifacts were stored correctly using build artifacts browser\nthat is available from the build sidebar. It is there!\n\n![code coverage report artifacts](https://about.gitlab.com/images/blogimages/publish-code-coverage-report-with-gitlab-pages/coverage-report-artifacts-browser.png)\n\n\u003Ci class=\"fas fa-arrow-alt-circle-right\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n**3. Finally, publish with GitLab Pages**\n\n\u003Ci class=\"fas fa-info-circle\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\nFollow the documentation about how to [use GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/index.html).\n{: .alert .alert-info}\n\n```yaml\nimage: ruby:2.3\n\nrspec:\n  stage: test\n  script:\n    - bundle install\n    - rspec\n  artifacts:\n    paths:\n      - coverage/\n\npages:\n  stage: deploy\n  dependencies:\n    - rspec\n  script:\n    - mv coverage/ public/\n  artifacts:\n    paths:\n      - public\n    expire_in: 30 days\n  only:\n    - master\n```\n\nA job that is meant to publish your code coverage report with GitLab Pages has\nto be placed in the separate stage. Stages `test`, `build` and `deploy` are\nspecified by default, but you can change that if needed. Note that you also\nneed to use `pages` as a job name.\n\nUsing the `dependencies` keyword, we tell GitLab to download the artifacts stored\nas part of the `rspec` job. You also need to rename the directory from `coverage/`\nto `public/` because this is the directory that GitLab Pages expects to find\nstatic website in.\n\nIt makes sense to deploy a new coverage report page only when the CI pipeline\nruns on `master` branch, so we added the `only` keyword at the end of the\nconfiguration file. This will also expire artifacts after 30 days, what does\nnot affect coverage report that has already been published.\n\n### How to run parallel tests\n\nThings get a little more complicated when you want to parallelize your test\nsuite.\n\nGitLab is capable of running tests jobs in parallel and you can use this technique\nto decrease wall-clock elapsed time that is needed to execute all tests /\nbuilds in the CI pipeline significantly.\n\nNumerous approaches are available, the most simple being to split test manually,\nwhereas the more sophisticated is to use tools or plugins that do distribute\nthe tests jobs evenly in the automated fashion.\n\nShould you decide to parallelize your test suite, you will need to generate a partial\ncode coverage report in each parallel job and store it as a build artifact.\nThen, you will need another stage in the pipeline with a job that merges the partial\ncode coverage metrics into the previous one and generates a single report that takes all\nresults (generated during parallel jobs) into account.\n\nAt GitLab, we parallelize our test suite heavily, and we do use additional\ntools to distribute the test jobs evenly. SimpleCov does not support merging\nresult sets out-of-the-box, so we had to [write a patch for it](https://gitlab.com/gitlab-org/gitlab-ce/blob/master/scripts/merge-simplecov).\nThere is an issue about [contributing this change back to the SimpleCov](https://gitlab.com/gitlab-org/gitlab-ce/issues/23717).\n\n### How to deploy coverage report as GitLab Pages\n\nWhen you push your changes in `.gitlab-ci.yml` to GitLab for the first\ntime, you will see new jobs in the CI pipeline.\n\n![coverage-report-deploy-job](https://about.gitlab.com/images/blogimages/publish-code-coverage-report-with-gitlab-pages/coverage-report-pages-deploy-job.png)\n\nIf the `pages:deploy` job has been successful, the status icon for it is green.\nThis means that you can access you coverage report page using a URL like\n`http://group-path.gitlab.io/project-path`, for example\n`https://gitlab-org.gitlab.io/gitlab-ce`.\n\nThat way, a new coverage report will be published each time you push new code\nto GitLab!\n\n## How to use the code coverage report badge\n\nOnce you have the code coverage report published with GitLab Pages, you may want to\nput a link to it somewhere. We recommend using the code coverage badge that you\ncan add to your `README.md` file for that purpose.\n\nThis is how it looks in [our README.md](https://gitlab.com/gitlab-org/gitlab-ce/blob/master/README.md).\n\n![coverage-badge-gitlab](https://about.gitlab.com/images/blogimages/publish-code-coverage-report-with-gitlab-pages/code-coverage-badge-gitlab.png)\n\nWhen someone clicks the coverage badge, the code coverage report page will be opened.\nThe Markdown source is as follows:\n\n```markdown\n[![Coverage report](https://gitlab.com/gitlab-org/gitlab-ce/badges/master/coverage.svg?job=coverage)](http://gitlab-org.gitlab.io/gitlab-ce/coverage-ruby)\n```\n\n\u003Ci class=\"fas fa-info-circle\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\nYou can find more info about report badges and the other types of badges in [our documentation](https://docs.gitlab.com/ee/ci/pipelines/settings.html#pipeline-badges).\n{: .alert .alert-info}\n\n## Summary\n\nAlthough the code coverage technique is great for revealing untested code and\nimproving overall coverage, it is not a great metric to tell how good\nthe tests are, but it helps people to contribute.\n\nWith GitLab, you can create simple software that it is easy to contribute to!\n",{"slug":8691,"featured":6,"template":678},"publish-code-coverage-report-with-gitlab-pages","content:en-us:blog:publish-code-coverage-report-with-gitlab-pages.yml","Publish Code Coverage Report With Gitlab Pages","en-us/blog/publish-code-coverage-report-with-gitlab-pages.yml","en-us/blog/publish-code-coverage-report-with-gitlab-pages",{"_path":8697,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8698,"content":8703,"config":8707,"_id":8709,"_type":16,"title":8710,"_source":17,"_file":8711,"_stem":8712,"_extension":20},"/en-us/blog/why-we-chose-vue",{"title":8699,"description":8700,"ogTitle":8699,"ogDescription":8700,"noIndex":6,"ogImage":4861,"ogUrl":8701,"ogSiteName":692,"ogType":693,"canonicalUrls":8701,"schema":8702},"Why we chose Vue.js","Why GitLab went with Vue.js","https://about.gitlab.com/blog/why-we-chose-vue","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we chose Vue.js\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jacob Schatz\"}],\n        \"datePublished\": \"2016-10-20\",\n      }",{"title":8699,"description":8700,"authors":8704,"heroImage":4861,"date":8705,"body":8706,"category":14},[6995],"2016-10-20","\n\nI had a great conversation with an interviewee a few weeks ago about how one\nshould go about choosing a JavaScript framework.\n\nHe pointed out that when a major software company releases their secret sauce,\nthere is going to be hype. Devs think to themselves, \"That company writes JS\ndifferently than me, and they are prominent and successful. Is their way of\nwriting JS better than mine? And therefore must I adopt it?\"\n\nTheir secret sauce may be awesome, but don't assume awesomeness just because everyone else gets excited. You wouldn't copy and paste an answer from StackOverflow, without understanding it, so why copy and paste an entire framework?\n\nWhich brings me to our decision to use [Vue.js](https://vuejs.org/) at GitLab.\n\n## Simplicity and ease of use\n\nPrimarily what drew us to Vue.js is that it allows our team to _easily_ write _simple_\nJavaScript. Getting started with Vue.js is extremely easy. Its [source code](/solutions/source-code-management/) is\nvery readable, and the documentation is the only tutorial you'll ever need. You\ndon't need external libraries. You can use it with or without jQuery. You won't\nneed to install any plugins, though many are available. I like vanilla Vue.js\npersonally, although I can reach for vue-resource when I need it. Hooking Vue.js\nup to existing code is very straightforward. There's no magic to Vue.js -- it's `Object`s\nall the way down.\n\nI talk to a lot of JavaScript devs and I find it really interesting that the ones who\nspend the most time in Angular tend to not know JavaScript nearly as well. I don't want\nthat to be me or our devs. Why should we write \"not JavaScript?\"\n\nI remember back when I was using Backbone, I had to really force myself to stay\nDRY, because it's really a blank canvas. Vue.js does not make large assumptions\nabout much of anything either. It really only assumes that your data will change.\n\nBut Vue.js comes with the perfect balance of what it will do for you and what you\nneed to do yourself. If Backbone was anarchy (no one in charge) and Angular is a\ndictatorship (the Angular team is in charge), I'd say Vue.js is like socialism: you\nare definitely in charge, but Vue.js is always within reach, a sturdy, but\nflexible safety net ready to help you keep your programming efficient and your\nDOM-inflicted suffering to a minimum.\n\nTo give you an idea of what I mean, here's a simple\n[Codepen](http://codepen.io/jschatz1/pen/dpQkpx):\n\n```html\n\u003Cdiv id=\"journal\">\n  \u003Cinput type=\"text\" v-model=\"message\">\n  \u003Cdiv>{{message}}\u003C/div>\n\u003C/div>\n```\n\n```javascript\nvar journal = new Vue({\n  el: '#journal',\n  data: {\n    message: 'Your first entry'\n  }\n});\n```\n\nIf you've seen a few JavaScript libraries, it's not hard to understand everything in\nthis example without any documentation. And usually with other frameworks, this is\nwhere the simplicity stops. You get nice, simple examples when you're \"Getting\nstarted\", but in reality things get complicated as soon as you to try to get\nyour money's worth out of the framework. Not with Vue.js though -- real-life usage\nseems to stay as simple as the docs.\n\nAnd that is what we love about Vue.js: it's an elegant combination of structure\nand simplicity. The data for the view goes in an object called `data`, but the\ndata can get there and look however you want. Any functions you'll write as\ncallbacks for events go into a `methods` object, but they can do or return whatever\nyou want. Vue.js just knows when things change and updates your views. And you write less code.\n\n## Vue.js + GitLab === Less code\n\nSo what problem does this solve for GitLab? When I joined, all the JavaScript was written\nwith JQuery. There is nothing _wrong_ with that, except that it takes a lot\nmore code to solve every problem. We knew we could do better. Once we started with\nVue.js, we could immediately and consistently solve complex problems in much less time.\n\nA simple, but practical example we're using in production: on a GitLab\nIssue, the issue's state is displayed as either `closed` or `open`. That\nsimple value can change often and needs to be represented in several views.\nWith JQuery, we had about 30 or so lines of code to propagate those changes, and\nthose lines involved multiple classes and querying the DOM by hand.\n\nIn Vue.js, this now requires us to write one line of JavaScript. The only other code\nwe add is in the HTML, and that's just a few additional attributes.\n\nWhat [Evan You](https://twitter.com/youyuxi) knows is that creating a kick ass framework isn't just about\nwriting great code. You need excellent documentation, a strong community helping each other\nlearn, a supporting cast of libraries and plugins to help users solve the hard problems,\nand short feedback loops based on user feedback to keep the framework relevant. Vue.js is all\nof that, *plus* great code. That's why we're using it. What about you?\n\n## Watch the Why We Chose Vue.js webcast\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/ioogrvs2Ejc\" frameborder=\"0\" allowfullscreen>\u003C/iframe>\n",{"slug":8708,"featured":6,"template":678},"why-we-chose-vue","content:en-us:blog:why-we-chose-vue.yml","Why We Chose Vue","en-us/blog/why-we-chose-vue.yml","en-us/blog/why-we-chose-vue",{"_path":8714,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8715,"content":8721,"config":8726,"_id":8728,"_type":16,"title":8729,"_source":17,"_file":8730,"_stem":8731,"_extension":20},"/en-us/blog/automated-debian-package-build-with-gitlab-ci",{"title":8716,"description":8717,"ogTitle":8716,"ogDescription":8717,"noIndex":6,"ogImage":8718,"ogUrl":8719,"ogSiteName":692,"ogType":693,"canonicalUrls":8719,"schema":8720},"Automated Debian Package Build with GitLab CI","Continuous Deployment with GitLab: how to build and deploy a Debian Package with GitLab CI","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749684154/Blog/Hero%20Images/adfinis-sygroup-cover.png","https://about.gitlab.com/blog/automated-debian-package-build-with-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Automated Debian Package Build with GitLab CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Adfinis SyGroup\"}],\n        \"datePublished\": \"2016-10-12\",\n      }",{"title":8716,"description":8717,"authors":8722,"heroImage":8718,"date":8724,"body":8725,"category":14},[8723],"Adfinis SyGroup","2016-10-12","\n\nThis post is a customer story by [Adfinis SyGroup][adf].\n{:.note}\n\nWe've decided to use [GitLab CI][ci] to build Debian packages\nautomatically. GitLab CI allows users to execute tasks based\non definable events, such as Git tags.\n\nWe've created a generic Docker container which contains the base\npackage-building tools and is used by GitLab to build the package.\nUpdates can be easily installed in the build environment, since the\nDocker container can be simply replaced with a new one.\n\nThe following shows the automated packaging of the [GoAccess] log\nanalysis tool. Many tools are not packaged in their latest version\nand thus have to be created manually.\n\n\u003C!-- more -->\n\n## Prepare the Debian Package\n\nFirst, the files which control the building of the Debian package\nare created. In the case of GoAccess, these are:\n\n```shell\ndebian/changelog # Changes to the package and software  \ndebian/compat    # Compatibility level for debhelper  \ndebian/control   # Package-specific information such as dependencies and description  \ndebian/rules     # Instructions for debhelper \n```\n\nDebian themselves already offer [detailed documentations][debian-doc]\nto simplify the introduction to packaging.\n\n## Prepare the Docker Container\n\nOn a host system, a container must be prepared in which a package can\nthen be built. Start by creating a `Dockerfile`:\n\n```dockerfile\nFROM debian:wheezy  \nADD  setup.sh /opt/  \nRUN  /bin/bash /opt/setup.sh  \n```\n\nIn the `Dockerfile` ([official documentation][dockerfile-doc]) is indicated\nwhich base image is to be used. In this case, it's Debian Wheezy. After\nthat, the `setup.sh` script is copied into the `/opt/` directory of the container. \nIn `setup.sh`, the mirror which is going to be used is configured, and\nthe most basic dependencies are installed, which can be used in any build:\n\n\n```shell\n#!/bin/sh\n\n# change to our own mirror\necho \"deb http://pkg.adfinis-sygroup.ch/debian/ wheezy main non-free contrib\" > /etc/apt/sources.list  \necho \"deb http://security.debian.org/ wheezy/updates main\" >> /etc/apt/sources.list  \necho \"deb http://pkg.adfinis-sygroup.ch/debian/ wheezy-updates main contrib non-free\" >> /etc/apt/sources.list\n\n# requirements\napt-get update  \napt-get -y install git dh-make build-essential autoconf autotools-dev  \n```\n\nAs soon as these files have been prepared, we can build the Docker container:\n\n```shell\ndocker build -t generic-package-build-runner:v1 \n```\n\nThe container is now created and ready for use.\n\n## Configure GitLab CI\n\nNow, the prepared Docker container has to be [registered for the\ncurrent project][ci-docker-registry], in which a package is to be built:\n\n```shell\ngitlab-ci-multi-runner register \\\n--non-interactive \\\n--url \"$(GITLAB_URL)\" \\\n--registration-token \"$(CI_TOKEN)\" \\\n--description \"Generic debian wheezy package build runner\" \\\n--executor \"docker\" \\\n--docker-image \"generic-package-build-runner:v1\"\n```\n\nThe GitLab URL and the CI token can be found in the GitLab\nproject on the page **Settings** > **Runners**. Each project has its own CI token.\n\nIn order for GitLab CI to know which commands in the container\nshould be executed, [the file `.gitlab-ci.yml`][ci-doc] is created within the repository.\n\n```yaml\n# Is performed before the scripts in the stages step\nbefore_script:  \n  - source /etc/profile\n\n# Defines stages which are to be executed\nstages:  \n  - build\n\n# Stage \"build\"\nrun-build:  \n  stage: build\n  script:\n    - apt-get install -y libncurses5-dev libglib2.0-dev libgeoip-dev libtokyocabinet-dev zlib1g-dev libncursesw5-dev libbz2-dev\n    - autoreconf -fvi\n    - cp COPYING debian/copyright\n    - dpkg-buildpackage -us -uc\n    - mkdir build\n    - mv ../goaccess*.deb build/\n\n  # This stage is only executed for new tags\n  only:\n    - tags\n\n  # The files which are to be made available in GitLab\n  artifacts:\n    paths:\n      - build/*\n```\n\nThe most important part of this file is the `run-build` stage.\nThis part defines which actions are executed, when they are\nexecuted and the locations of the files created in the build.\n\nSince a generic Docker container was created, the necessary\ndependencies have to be installed in the first step.\n\nAfter that, the building procedure is prepared with `autoreconf`.\nAmong other things, this results in the creation of the Makefile,\nwhich is indispensable for the build. Since we're using the copyright\nfrom the package, we'll copy it to `debian/`.\n\nThe building process is then started with the command `dpkg-buildpackage`.\nThe package is compiled and the Debian package is created. These packages\nare then moved to the `build` directory that was created and uploaded to GitLab.\n\n## Workflow\n\nAs soon as we have a new release, a Git tag is created. This Git tag\nstarts a new build in GitLab, which builds the package using the latest version.\nThe package that is created will then be made available in the web\ninterface of GitLab, where it can be downloaded. \n\n![Build Debian Package with GitLab CI](https://about.gitlab.com/images/blogimages/automated-debian-package-build-with-gitlab-ci/gitlab-ci-build.png){:.shadow}\n\n## Outlook\n\nIdeally, the packages or artifacts built should be processed automatically,\nfor example by uploading them to a mirror. In our case, we're using a\nbot which, when instructed by a [GitLab Webhook][webhooks-doc], downloads the artifacts\nonto a target server, adds them to a Aptly repository and publishes the\nrepository, such that the process can be [fully automated from package\nbuild to publication][gitlab-cd]. The final result can be freely viewed on our Aptly mirror.\n\n",{"slug":8727,"featured":6,"template":678},"automated-debian-package-build-with-gitlab-ci","content:en-us:blog:automated-debian-package-build-with-gitlab-ci.yml","Automated Debian Package Build With Gitlab Ci","en-us/blog/automated-debian-package-build-with-gitlab-ci.yml","en-us/blog/automated-debian-package-build-with-gitlab-ci",{"_path":8733,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8734,"content":8740,"config":8744,"_id":8746,"_type":16,"title":8747,"_source":17,"_file":8748,"_stem":8749,"_extension":20},"/en-us/blog/wrapping-text",{"title":8735,"description":8736,"ogTitle":8735,"ogDescription":8736,"noIndex":6,"ogImage":8737,"ogUrl":8738,"ogSiteName":692,"ogType":693,"canonicalUrls":8738,"schema":8739},"To Wrap or Not to Wrap: One of Life's Greatest Questions","Breaking lines: useful or painful?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683906/Blog/Hero%20Images/wrap-not-to-wrap-text.png","https://about.gitlab.com/blog/wrapping-text","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"To Wrap or Not to Wrap: One of Life's Greatest Questions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Marcia Ramos\"}],\n        \"datePublished\": \"2016-10-11\",\n      }",{"title":8735,"description":8736,"authors":8741,"heroImage":8737,"date":8742,"body":8743,"category":14},[8399],"2016-10-11","\n\nWhether and how we should be making line breaks is currently a\ncontroversial topic amongst the GitLab folks;\nit's an issue of style as well as ease of use.\nIn this post, we’re presenting the two current views held, and \ngiving you the opportunity to speak your mind\n(in the comments or [on Twitter][twitter]) about how we should\nhandle this in our [style guide][style-guide].\n \nThe current policy laid out in the style guide is that you should:\n\n> _Split up long lines, this makes it much easier to review and edit.\nOnly double line breaks are shown as a full line break in\n[GitLab markdown][markdown]. 80-100 characters is a good line length._\n\n\u003C!-- more -->\n\nThe understanding is that you if you create a line break after\n80 characters, the text becomes easier to review.\n\nHowever, quite a few people feel that this isn't helpful\nand creates stylistic and/or visual messiness when,\nfor example, edits are made which cause the line breaks to go amok.\nBasically, there are two options if your line exceeds 80 characters:\n\n1. Wrap the text as the author of the code.\n1. Rely on the reviewer to wrap the text with their\nlocal editor, and insert a new line for every sentence.\n\n## DON'T WRAP IT.\n\nSome of us at GitLab are in the don’t-wrap camp, positioning\nthat it’s better to rely on the reviewer. If you modify text,\nyou don’t want to have to realign the rest of the text when\nedits are made because the line breaks are uneven. Rewrapping\nrequires a feature that some editors (including GitLab)\ndon’t have. Some people might have it but not know they\nhave it, or not know where to find it in their editor.\nIf you do rewrap with your editor, the diff might get messy\nand make it hard to work out what's visually changed.\nPlus, that 80- to 100-character limit is more of a guess\nthan an exact limit. It can be different depending on your\neditor/screen size.\n\nThis is how a paragraph looks like after a few reviews when\nyou choose to wrap the text. It's odd, as it has long lines\nand short lines distributed unevenly:\n\n![wrapped text - uneven view](https://about.gitlab.com/images/blogimages/wrapping-text/wrapped-text-after-reviews.png){:.shadow}\n\nHowever, when you don't wrap it, a collaborator needs to\npoint to the portion of the text before explaining the change,\nwhich can be very time consuming. For example, in the paragraph\nbelow, the collaborator needs to identify the link first, and\nrepeat the section with the proposed changes:\n\n![change link - non-wrapped text](https://about.gitlab.com/images/blogimages/wrapping-text/unwrapped-text-worse-to-review.png){:.shadow}\n\n## DO WRAP IT.\n\nThe company's official position is of the _do-wrap variety_,\nwith the main reason that when people view and edit the text,\nthey can do so without scrolling horizontally _ad infinitum_,\nwhich drives some people nuts. As a result, the comment \nbox sometimes extends beyond the edge of the viewport, making\nit difficult to use any of the buttons on the right-hand side.\n\nTo compare to the last example, the following screenshot\nof a text already wrapped, it's easy to comment inline directly,\nwithout having to identify what you're talking about first\nand commenting afterwards.\n\n![change link - wrapped text](https://about.gitlab.com/images/blogimages/wrapping-text/wrapped-text-easier-to-review.png){:.shadow}\n\n## Comparing Similar Views\n\nWhen editing a file in the GitLab UI, this is how wrapped text\nlooks like:\n\n![wrapped text, GitLab UI view](https://about.gitlab.com/images/blogimages/wrapping-text/gitlab-ui-wrapped-text.png){:.shadow}\n\nAnd this is how an unwrapped code looks like:\n\n![non-wrapped text, GitLab UI view](https://about.gitlab.com/images/blogimages/wrapping-text/gitlab-ui-non-wrapped-text.png){:.shadow}\n\nEven when you're reviewing locally, check how a wrapped\ntext looks like on Sublime:\n\n![Sublime text view - wrapped](https://about.gitlab.com/images/blogimages/wrapping-text/wrapped-text-easier-to-read.png){:.shadow}\n\nAnd how it looks when unwrapped:\n\n![Sublime text view - non-wrapped](https://about.gitlab.com/images/blogimages/wrapping-text/unwrapped-text-scroll-horizontally.png){:.shadow}\n\nClearly, wrapped text can be considered better for both reading\nand reviewing through inline comments. But it can also be annoying\nwhen writing and editing the file.\n\n## Going forward\n\nParticularly, while I'm writing, I don't wrap the text. I'll\ndo that just when it's ready for review. This way, I save myself\nsome time when writing and editing, but I still leave it wrapped\nfor facilitating the reviewers to add inline comments in my\nmerge request.\n\nBut yes, there are pros and cons for both cases, of course. The\nquestion is, what can we do to make it less painful for everyone?\n\nOne of the possible actions we could take going forward is to set\na max-width both on the comment box container, and on the editor,\nso that it remains in a usable state, regardless of code length.\n\nOther ideas? We’re open to them. Let us know what you think, and\nhow you do that with your team.\n\n**Note**: Currently for blog post technical articles we follow the\nStyle Guide, but the [Blog Post Style Guide][blog-style-guide] is\nan override or addendum to that. The consensus has been that wrapped\ntext facilitates review. We have a lot of guest writers and many\nreviewers, and this seems to be their preference, so we’ve tried\nto honor that.\n{:.note}\n\n\u003C!-- identifiers -->\n\n[markdown]: https://docs.gitlab.com/ee/user/markdown.html\n[sid-twitter]: https://twitter.com/sytses\n[style-guide]: https://docs.gitlab.com/ee/development/documentation/styleguide/#text\n[twitter]: https://twitter.com/gitlab\n[blog-style-guide]: /handbook/marketing/blog/#styles-guidelines\n",{"slug":8745,"featured":6,"template":678},"wrapping-text","content:en-us:blog:wrapping-text.yml","Wrapping Text","en-us/blog/wrapping-text.yml","en-us/blog/wrapping-text",{"_path":8751,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8752,"content":8758,"config":8762,"_id":8764,"_type":16,"title":8765,"_source":17,"_file":8766,"_stem":8767,"_extension":20},"/en-us/blog/infrastructure-update",{"title":8753,"description":8754,"ogTitle":8753,"ogDescription":8754,"noIndex":6,"ogImage":8755,"ogUrl":8756,"ogSiteName":692,"ogType":693,"canonicalUrls":8756,"schema":8757},"GitLab Infrastructure Update","Hear how we're working through infrastructure challenges as we scale.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683895/Blog/Hero%20Images/infrastructure.jpg","https://about.gitlab.com/blog/infrastructure-update","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Infrastructure Update\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Pablo Carranza\"}],\n        \"datePublished\": \"2016-09-26\",\n      }",{"title":8753,"description":8754,"authors":8759,"heroImage":8755,"date":8760,"body":8761,"category":14},[8670],"2016-09-26","\n\n\nAs Infrastructure Lead, my job is to make [GitLab.com][gitlab] fast and highly available. \n\nLately, it's been a challenge. Why? We are hitting our threshold where scale starts to matter. For example, over 2,000 new repos\nare being created during peak hours, and CI runners are requesting new builds 3,000,000 times per hour.\nIt's an interesting problem to have. We have to store this information somewhere and make sure that \nwhile we're gaining data and users, GitLab.com keeps working fine. \n\nA large part of the issue we're running into as we scale is that there is little or no documentation \non how to tackle this kind of problem. While there are companies that have written high-level posts, almost none of them\nhave shared **how** they arrived at their solutions.\n\nOne of our main issues in the past six months has been around storage. We built a CephFS cluster to tackle both the capacity and\nperformance issues of using NFS appliances. Another more recent issue is around PostgreSQL vacuuming and how it affects performance locking up the database\ngiven the right kind of load. \n\nAs [outlined in our values][values], we believe we have a \nresponsibility to document this so other companies know what to do when they reach this point.\nLast Thursday, I gave a GitLab.com infrastructure status report during our [daily team call][team-call]. \nWatch the recording or download the slides to see how we're working through our challenges with scaling. \n\n\u003C!-- more -->\n\n## Recording & Slides\n\n### Recording \n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/kN-HcObb9zo\" frameborder=\"0\" allowfullscreen>\u003C/iframe>\n\u003C/figure>\n\n\u003Cbr>\n\n### Slides\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://docs.google.com/presentation/d/11rCsJM41WAETPWqtWgfIxgfPRBQB4m037aZpgsGpzkk/embed?start=false&loop=false&delayms=5000\" frameborder=\"0\" width=\"1280\" height=\"749\" allowfullscreen=\"true\" mozallowfullscreen=\"true\" webkitallowfullscreen=\"true\">\u003C/iframe>\n\u003C/figure>\n\n\u003Cbr>\n\n\u003C!-- identifiers --> \n[gitlab]: https://gitlab.com/\n[team-call]: /handbook/communication/#team-call\n[values]: https://handbook.gitlab.com/handbook/values/\n\n",{"slug":8763,"featured":6,"template":678},"infrastructure-update","content:en-us:blog:infrastructure-update.yml","Infrastructure Update","en-us/blog/infrastructure-update.yml","en-us/blog/infrastructure-update",{"_path":8769,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8770,"content":8776,"config":8781,"_id":8783,"_type":16,"title":8784,"_source":17,"_file":8785,"_stem":8786,"_extension":20},"/en-us/blog/posting-to-your-gitlab-pages-blog-from-ios",{"title":8771,"description":8772,"ogTitle":8771,"ogDescription":8772,"noIndex":6,"ogImage":8773,"ogUrl":8774,"ogSiteName":692,"ogType":693,"canonicalUrls":8774,"schema":8775},"Posting to your GitLab Pages blog from iOS","Tutorial: Learn how to post to your GitLab Pages blog from anywhere, using your iOS device.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749684176/Blog/Hero%20Images/ios-writing-anywhere.png","https://about.gitlab.com/blog/posting-to-your-gitlab-pages-blog-from-ios","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Posting to your GitLab Pages blog from iOS\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Angelo Stavrow\"}],\n        \"datePublished\": \"2016-08-19\",\n      }",{"title":8771,"description":8772,"authors":8777,"heroImage":8773,"date":8779,"body":8780,"category":14},[8778],"Angelo Stavrow","2016-08-19","\n\nSpeed and stability under load are [advantages of using a static-site generator (SSG)][1],\nbut because building the site and uploading the generated HTML is done on a computer,\nare you stuck hauling around your laptop to post to your SSG-powered blog?\nNope! In this post we'll show you how to harness the power of [GitLab Pages][2]\nupdate your site from your iOS device anywhere, anytime.\n\n\u003C!-- more -->\n\n## Requirements\n\nYou'll need an SSG-powered site set up on GitLab.com, of course (they're free _and_\n[easy to set up][3]!), but you'll also need an app for performing [Git][Git] operations\nand for text editing on your iOS device.\n\nIn this tutorial, we'll post to a [Hugo][4]-based site (though it'll work with\n[any SSG that you use with GitLab Pages][pageshowto]) using [Working Copy][5] to\nhandle Git operations and [Editorial][6] for text-editing. GitLab provides\n[a template][hugotemplate] that you can use to set up your own Hugo-powered SSG.\n\nNote that these apps aren't free. Working Copy requires an in-app purchase to be\nenable pushing to a remote repository, and Editorial costs a small amount. Of course,\nthere are other options available (you can explore some of them on the\n[GitLab Community Applications page][7]), but these are the apps we'll be using\nto describe the process in this tutorial.\n\n## Concept\n\nGitLab Pages uses [GitLab CI][ci] to automate [building and deploying][cd] your SSG-powered\nwebsite when you push a commit to a certain branch (e.g., `master`). When GitLab CI\nsees a new commit on this branch, it triggers the [GitLab Runner][gitlabrunner] to\nexecute the scripts in the [`.gitlab-ci.yml`][gitlabciyml] file you created when\nyou set up your SSG for GitLab Pages. There's a [great set of templates][templates]\nfor setting up your SSG of choice, including sample `.gitlab-ci.yml` files.\n\nSince the process needs some (pretty straightforward) Git management, we need an\napp to handle committing and pushing to GitLab. It's also a good reason to\ncheckout a new branch while you're working on your edits! Imagine you're drafting\na new article on your iPhone, then push it to `master` on GitLab so that you can\npick up where you left off on another device (say, your computer). GitLab CI will\npick up the commit, and publish a half-finished post automatically! By working on\na separate branch, you don't have to worry about that happening.\n\n## The details\n\n### Cloning your site's repository\n\nStart by logging in to your GitLab account in Working Copy and cloning your website's\nrepository to your iOS device by following the instructions in the app's [user guide][8].\n\n### Creating a new branch\n\nNext, navigate to your site's repository and create a new branch. In Editorial,\nthis is done by tapping the **Repository** cell, then the **Branch** cell, and then\nthe top-rightmost button (create new branch) in the navigation bar. Give your branch\na name (for example, something like `post_title-of-article_yyyy-mm-dd` might be\nuseful), and then tap on **Done**. Tap the **\u003C** (back) button on the top-left of\nthe navigation bar twice to go back to your repository.\n\n![Creating a new branch in Working Copy](https://about.gitlab.com/images/blogimages/posting-to-your-gitlab-pages-blog-from-ios/wc-add-new-branch-annotated.png){: .shadow}\n\n### Creating a new file for the post\n\nNow that you're on a new branch, navigate within your repository to the folder where\nposts go. In Hugo's default [setup][hugoquickstart], this is `/content/post`&mdash;\nnavigating here, you should see all of your existing posts listed. To add a new\nfile, tap the **&#43;** button in the top-right of the navigation bar, and from\nthe sheet that pops up, tap **Create text file**. Give the file a name (e.g.,\n`title-of-article.md`). If you like, tap on the newly-created file to view details,\nthen go back.\n\n![Creating a new file in Working Copy](https://about.gitlab.com/images/blogimages/posting-to-your-gitlab-pages-blog-from-ios/wc-add-new-file-annotated.png){: .shadow}\n\n### Opening the file for editing\n\nOf course, since the file is empty, you need to edit it to draft your post. Tap on\nthe file in the table view to display the file contents (currently empty), then\ntap on the share icon in the upper-right corner of the navigation bar, and in the\nshare sheet that pops up, tap **Edit** to begin editing the file in Working Copy, or&mdash;if you have it installed&mdash;tap **Edit in Editorial** to open the\nfile in Editorial for writing. The first time you do so, Working Copy will let you\nknow what's going to happen, and invite you to install the \"Working Copy\" workflow\nin Editorial. This will let you send the file back to Working Copy, ready for committing.\n\n![Opening the file for editing in Editorial](https://about.gitlab.com/images/blogimages/posting-to-your-gitlab-pages-blog-from-ios/wc-open-file-for-editing-annotated.png){: .shadow}\n\n### Adding required front matter\n\nHugo (and most other SSGs) require each post to have some [front matter][hugofrontmatter],\nincluding a date, a title, and so on. One nice option with Editorial is that it\ncan natively expand [TextExpander][9] shortcuts _without_ having to switch to\nalternate keyboards. You can create a template for your front matter and, upon\nopening the file in Editorial, type the shortcut (e.g., `;toml` or `;yaml`),\nand&mdash;💥 poof💥&mdash;the shortcut will be expanded and ready for whatever\nyou need to enter.\n\n### Writing the post\n\nYou're now ready to type your article! Go ahead and type to your heart's content.\nYou can swipe to the left from the edge of the screen in Editorial to show a Markdown\npreview of what you've written so far, in case you want to preview the post. Keep\nin mind that your SSG may not be using the same Markdown rendering engine as\nEditorial does, so if you're using non-standard Markdown elements, the final post\nmay not look exactly the same.\n\n![Sending the file back to Working Copy from Editorial](https://about.gitlab.com/images/blogimages/posting-to-your-gitlab-pages-blog-from-ios/wc-editorial-workflow-annotated.png){: .shadow}\n\nWhen you're at a point where you want to save and commit your progress, tap the 🔧\nicon in the top-right of the navigation bar in Editorial, then tap the **Working\nCopy** workflow, and you'll be taken back to Working Copy, ready to commit. Enter\na commit message and tap **Commit** in the navigation bar to commit your changes.\n\n### Adding images to your post\n\nWant to add an image to your post? You can save a photo to your repository in Working\nCopy by navigating to the right folder (in Hugo, images should be saved in the\n`static` folder; other SSGs will vary), tapping the **&#43;** button, and tapping\non **Import image** in the pop-up sheet. Select the images you want to add from\nyour photos (you may have to give Working Copy permission to access the library\nfirst), and it'll be added to the repository. You then just have to reference them\nappropriately in your Markdown file.\n\n### Committing your changes and pushing the content back to GitLab\n\nOnce you're ready to commit, tap the **Repository** field in Working Copy's repository\nnavigator, then tap on the **Commit changes** button. You'll be prompted to enter\na short summary, as well as an (optional) detailed explanation of the changes. Below\nthe text fields, you'll see a list of files to commit (i.e., the text file you added\nfor your post, and any images you uploaded in Working Copy).\n\n![Commit and push from Working Copy to GitLab](https://about.gitlab.com/images/blogimages/posting-to-your-gitlab-pages-blog-from-ios/wc-commit-and-push-annotated.png){: .shadow}\n\nOnce you've entered a commit message, the **Commit** button will be enabled in the\ntop-right of the navigation bar. Next to it is a **&#43;Push** toggle; if it's\nhighlighted in blue, tapping on the commit button will commit the changes and push\nthem to GitLab; otherwise, the commit will only take place on your iOS device. Tap\nthe **&#43;Push** button to toggle this behavior. This may be useful if you want\nto make multiple commits while you're working without a network connection, for\nexample, then pushing them all at once to GitLab once you're connected again.\n\n### Merging the post branch into master to trigger CI and publish\n\nSo you've written your post, added some images, and pushed the changes to GitLab.\nWant to publish from your iPhone? You can easily do so from GitLab! Launch\nSafari and log in to GitLab, and create a new Merge Request as you normally\nwould to merge the changes in your `post_title-of-article_yyyy-mm-dd` branch to\n`master`. Accept the merge and GitLab CI will pick up the changes, execute the\nrequired scripts, and publish the changes to your site!\n\n## Final thoughts\n\nThe workflow described in this article might be for you if:\n\n- you're the type to be struck by inspiration, and want to be able to draft something\nquickly, or\n- you're often away from your computer, or\n- you want to blog about your trip while you're travelling, or\n- you simply prefer to use iOS devices in place of computers,\n\nIf any of these situations apply to you, this is a very convenient way to use a\n SSG for its benefits, without giving up your ability to work from anywhere.\n\nHowever, there are some caveats to consider. For one, you can't render your site\nlocally on your iPhone to preview what your post (or other changes, for that matter)\nwill look like when it goes live. If you're still in the process of tweaking things,\nor  you haven't fully explored your SSG's Markdown rendering engine, that can be\na bit troublesome&mdash;for example, you may only find out after the post goes\nlive that it will correctly render an HTML entity by code (e.g., `&#43;`) but not\nby description (e.g., `&plus;`). Oops. Of course, you can always sync your working\nbranch back to your computer and preview it there, if you really need to.\n\nAdditionally, if you're using your iPhone, the screen and virtual keyboard size\nmay be uncomfortable for typing longer posts. You can certainly use an external\nBluetooth keyboard, but this may not be practical, and it doesn't change the size\nof your phone's screen!\n\nFinally, while Working Copy and Editorial are both excellent, professional-level\napps, they may not be in your budget. Fortunately, you can always do everything\noutlined here via the GitLab website (creating new branches and files) as long as\nyou have an internet connection; if you want to continue writing while offline,\nyou can always copy and paste into one of [many text editors for iOS][itexteditors],\nseveral of which also feature Markdown previewing.\n\nWhatever method you choose, it's comforting to know that GitLab has you covered\nshould you want to post to your SSG-powered blog from your iOS device. What are\nyour preferred mobile git clients and text editors? Tell us in the comments!\n\n## About the author\n\n[Angelo](http://angelostavrow.com) is a Quality Engineer and Software Developer\nliving in Montreal, Canada. He believes that open, honest, collaboration is the\nbest path towards building great things _and_ great teams.\n\n\u003C!-- cover image: https://unsplash.com/photos/hkN2Zde2ga4 -->\n\n[1]: /blog/ssg-overview-gitlab-pages-part-1-dynamic-x-static/\n[2]: https://pages.gitlab.io/\n[3]: /blog/gitlab-pages-setup/\n[4]: http://gohugo.io\n[5]: http://workingcopyapp.com/\n[6]: http://omz-software.com/editorial/\n[7]: /partners/\n[8]: http://workingcopyapp.com/manual.html#cloning-repos\n[9]: https://textexpander.com/\n[Git]: https://git-scm.com/\n[ci]: /solutions/continuous-integration/ [cd]: /blog/continuous-integration-delivery-and-deployment-with-gitlab/\n[templates]: https://gitlab.com/groups/pages\n[gitlabrunner]: http://doc.gitlab.com/ee/ci/quick_start/README.html#shared-runners\n[gitlabciyml]: /blog/gitlab-pages-setup/#gitlab-ci\n[pageshowto]: /blog/ssg-overview-gitlab-pages-part-3-examples-ci/\n[hugoquickstart]: http://gohugo.io/overview/quickstart/\n[hugofrontmatter]: https://gohugo.io/content/front-matter/\n[hugotemplate]: https://gitlab.com/pages/hugo\n[itexteditors]: http://brettterpstra.com/ios-text-editors/\n",{"slug":8782,"featured":6,"template":678},"posting-to-your-gitlab-pages-blog-from-ios","content:en-us:blog:posting-to-your-gitlab-pages-blog-from-ios.yml","Posting To Your Gitlab Pages Blog From Ios","en-us/blog/posting-to-your-gitlab-pages-blog-from-ios.yml","en-us/blog/posting-to-your-gitlab-pages-blog-from-ios",{"_path":8788,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8789,"content":8795,"config":8800,"_id":8802,"_type":16,"title":8803,"_source":17,"_file":8804,"_stem":8805,"_extension":20},"/en-us/blog/building-an-elixir-release-into-docker-image-using-gitlab-ci-part-1",{"title":8790,"description":8791,"ogTitle":8790,"ogDescription":8791,"noIndex":6,"ogImage":8792,"ogUrl":8793,"ogSiteName":692,"ogType":693,"canonicalUrls":8793,"schema":8794},"Building an Elixir Release into a Docker image using GitLab CI - Part 1","Deploying projects written in Elixir/Erlang to production with Docker Containers and GitLab CI!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665223/Blog/Hero%20Images/containers.jpg","https://about.gitlab.com/blog/building-an-elixir-release-into-docker-image-using-gitlab-ci-part-1","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Building an Elixir Release into a Docker image using GitLab CI - Part 1\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Alexander Malaev\"}],\n        \"datePublished\": \"2016-08-11\",\n      }",{"title":8790,"description":8791,"authors":8796,"heroImage":8792,"date":8798,"body":8799,"category":14},[8797],"Alexander Malaev","2016-08-11","\n\n**Note:** this post is a customer story by Alexander Malaev, a software developer.\n{: .note}\n\nWell, we are actively using Phoenix/Elixir in our projects for backend development, we also have a RoR project as a frontend-service for our Admin UI. Our project consists of a bunch of microservices written in Elixir/Erlang, and we are running it in production with Docker-containers linked together and composed by Docker-compose.\n\nOn every push to a project's branch on [GitLab], [GitLab CI] runs tests, style checking, and other tasks. These tasks are configured using `.gitlab-ci.yml`. On every merge to `master` GitLab builds a release image for us and uploads it to [GitLab Container Registry][registry]. After all, we run `docker-compose pull && docker-compose up -d` on the servers to download the latest release images and upgrade our containers.\n\n\u003C!-- more -->\n\n## CI pipeline\n\nSo, in the following I will describe our release pipeline for Elixir services, using snippets from our project’s `.gitlab-ci.yml`.\n\nWe are using `docker:latest` image for our Runner, and several stages:\n\n```yaml\nimage: docker:latest\nstages:\n  - build\n  - styles\n  - test\n  - release\n  - cleanup\n```\n\nPassing some variables:\n\n```yaml\nvariables:\n  APP_NAME: project\n  APP_VERSION: 0.0.1\n  CONTAINER_RELEASE_IMAGE: gitlab.example.org/example/project:latest\n  POSTGRES_HOST: postgres\n  POSTGRES_USER: postgres\n  POSTGRES_PASSWORD: password\n```\n\nThese variables are used during the release's build, so they will be available for all the stages. E.g., `CONTAINER_RELEASE_IMAGE` is used on the release stage, as a link to push the release image to. The `POSTGRES_*` variables are used to configure postgres service, and to connect later from containers.\n\nOur build stage:\n\n```yaml\nbuild:\n  before_script:\n    - docker build -f Dockerfile.build -t ci-project-build-$CI_PROJECT_ID:$CI_BUILD_REF .\n    - docker create\n      -v /build/deps\n      -v /build/_build\n      -v /build/rel\n      -v /root/.cache/rebar3/\n      --name build_data_$CI_PROJECT_ID_$CI_BUILD_REF busybox /bin/true\n  tags:\n    - docker\n  stage: build\n  script:\n    - docker run --volumes-from build_data_$CI_PROJECT_ID_$CI_BUILD_REF --rm -t ci-project-build-$CI_PROJECT_ID:$CI_BUILD_REF\n```\n\nBefore running this stage, we create a container which provides volumes for building artifacts. By the way, GitLab CI has a cache volume itself for similar purposes, but I couldn’t make it working correctly with GitLab Runner using Docker image.\n\n```yaml\ntest:\n  services:\n    - postgres\n  tags:\n    - docker\n  stage: test\n  script:\n    - env\n    - docker run --rm\n      --link $POSTGRES_NAME:postgres\n      -e POSTGRES_HOST=$POSTGRES_HOST\n      -e POSTGRES_PASSWORD=$POSTGRES_PASSWORD\n      -e POSTGRES_USER=$POSTGRES_USER\n      -e MIX_ENV=$MIX_ENV\n      --volumes-from build_data_$CI_PROJECT_ID_$CI_BUILD_REF ci-project-build-$CI_PROJECT_ID:$CI_BUILD_REF sh -c \"mix ecto.setup && mix test\"\n```\n\nNotice that we must pass the variables and link postgres manually, since GitLab Runner is passing the variables only to the first level of Docker, but we go deeply ;)\n\nWe could link as many services as we want. For example, we are using Kafka for production, and on our test stage we make Kafka service available for running tests.\n\nStyle checking:\n\n```yaml\nstyles:\n  tags: \n    - docker\n  stage: styles\n  script:\n    - docker run --rm\n      --volumes-from build_data_$CI_PROJECT_ID_$CI_BUILD_REF ci-project-build-$CI_PROJECT_ID:$CI_BUILD_REF sh -c \"mix credo --strict\"\n```\n\nRelease task; we run it only on pushes to `master`:\n\n```yaml \nrelease:\n  tags:\n    - docker\n  stage: release\n  script:\n    - docker run\n      --volumes-from build_data_$CI_PROJECT_ID_$CI_BUILD_REF\n      -e MIX_ENV=prod --rm -t ci-project-build-$CI_PROJECT_ID:$CI_BUILD_REF\n      sh -c \"mix deps.get && mix compile && mix release\"\n    - docker cp build_data_$CI_PROJECT_ID_$CI_BUILD_REF:/build/rel/$APP_NAME/releases/$APP_VERSION/$APP_NAME.tar.gz .\n    - docker build -t $CONTAINER_RELEASE_IMAGE .\n    - docker login -u gitlab-ci-token -p $CI_BUILD_TOKEN gitlab.example.org:4567\n    - docker push $CONTAINER_RELEASE_IMAGE\n  only:\n    - master\n```\n\nWe are using Conform to achieve runtime configuration of the release using environment variables. I use the approach described on this [blog post][post-env].\n\nTask to cleanup things:\n\n```yaml\ncleanup_job:\n  tags:\n    - docker\n  stage: cleanup\n  script:\n    - docker rm -v build_data_$CI_PROJECT_ID_$CI_BUILD_REF\n    - docker rmi ci-project-build-$CI_PROJECT_ID:$CI_BUILD_REF\n  when: always\n```\n\nIt removes the container with volumes created for build artifacts, and removes the image used during the pipeline. This task is running every time, despite the results of any previous tasks.\n\nBelow are our Dockerfiles:\n\n`Dockerfile.build`:\n\n```dockerfile\nFROM msaraiva/elixir-gcc\nRUN apk add postgresql-client erlang-xmerl erlang-tools --no-cache\nWORKDIR /build\nADD . /build\nCMD mix deps.get\n```\n\nThis image is used to create a container for running tests and style checks.\n\n`Dockerfile`:\n\n```dockerfile\nFROM alpine:edge\nRUN apk — update add postgresql-client erlang erlang-sasl erlang-crypto erlang-syntax-tools && rm -rf /var/cache/apk/*\nENV APP_NAME project\nENV PORT 4000\nRUN mkdir -p /app\nCOPY $APP_NAME.tar.gz /app/\nWORKDIR /app\nRUN tar -zxvf $APP_NAME.tar.gz\nEXPOSE $PORT\nCMD trap exit TERM; /app/bin/$APP_NAME foreground & wait\n```\n\nThis Dockerfile is used to build an actual image with the Elixir release.\n\n## Existing problems\n\n- Now we don’t use the \"Erlang hot upgrade\" feature;\n- We don’t test if the release is correctly starting, now we are testing it manually and locally;\n- Every container uses its own \"epmd\" and intercommunication between the services, now made using REST apis, but I’m working on integration of [Erlang-In-Docker approach][approach] to use native erlang messaging between services.\n\n## What’s next?\n\nI have a plan to write and publish several articles about our release pipeline, to answer the following questions:\n\n- How do we compile and publish assets?\n- How do we run our database migrations, since mix tasks aren’t available from the release image?\n- What problems are we facing during the implementation of this pipeline, and what solutions have we found.\n\nThanks for reading!\n\n",{"slug":8801,"featured":6,"template":678},"building-an-elixir-release-into-docker-image-using-gitlab-ci-part-1","content:en-us:blog:building-an-elixir-release-into-docker-image-using-gitlab-ci-part-1.yml","Building An Elixir Release Into Docker Image Using Gitlab Ci Part 1","en-us/blog/building-an-elixir-release-into-docker-image-using-gitlab-ci-part-1.yml","en-us/blog/building-an-elixir-release-into-docker-image-using-gitlab-ci-part-1",{"_path":8807,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8808,"content":8812,"config":8816,"_id":8818,"_type":16,"title":8819,"_source":17,"_file":8820,"_stem":8821,"_extension":20},"/en-us/blog/continuous-integration-delivery-and-deployment-with-gitlab",{"title":8809,"ogTitle":8809,"noIndex":6,"ogImage":8052,"ogUrl":8810,"ogSiteName":692,"ogType":693,"canonicalUrls":8810,"schema":8811},"Continuous Integration, Delivery, and Deployment with GitLab","https://about.gitlab.com/blog/continuous-integration-delivery-and-deployment-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Continuous Integration, Delivery, and Deployment with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Marcia Ramos\"}],\n        \"datePublished\": \"2016-08-05\",\n      }",{"title":8809,"authors":8813,"heroImage":8052,"date":8814,"body":8815,"category":14},[8399],"2016-08-05","Can you imagine having [Continuous Integration, Continuous Delivery, and Continuous Deployment](/topics/ci-cd/)\nwithin the same web interface? With **GitLab**, you can!\n\nAfter a brief introduction to these topics,\nand a short walk through of some use-cases for these development practices, we'll present you\nwith a **video** illustrating the capability of going **from idea to production faster** with\nGitLab. Check how you can easily deploy your app automatically from GitLab to Docker Cloud.\n\n\u003C!-- more -->\n\n## Continuous Integration\n\n**[Continuous Integration][ci]** is a software development practice in which you **build and test** software\nevery time a developer pushes code to the application, and it happens several times a day.\n\nContinuous Integration: TEST - BUILD\n{: .alert .alert-warning .yellow}\n\nFor example, our developers push code to [GitLab CE][ce-repo]\nand [GitLab EE][ee-repo] every day, multiple times per day.\nFor every commit, we use [GitLab CI] to **test and build** our software. We run unit tests to make sure\nsome change didn't break other parts of the software. [Every push triggers multiple tests][ce-pipes],\nmaking it easier to identify where the error is when a test happens to fail.\nBut we **do not deploy to production often**, making both GitLab CE and EE cases\nof **Continuous Integration** only.\n\n## Continuous Delivery\n\n**[Continuous Delivery][cd]** is a software engineering approach in which **continuous integration**, **automated\ntesting**, and **automated deployment** capabilities allow software to be developed and [deployed rapidly],\nreliably and repeatedly with minimal human intervention. Still, the **deployment to production** is defined strategically\nand **triggered manually**.\n\nContinuous Delivery: TEST - BUILD - \u003Ci class=\"far fa-hand-pointer\" aria-hidden=\"true\" style=\"color: rgb(252,109,38) !important;\">\u003C/i> - DEPLOY\n{: .alert .alert-warning .yellow}\n\n[Mozilla Firefox][moz] and [Envato] are good examples of Continuous Delivery. They both get their product\n**deployed to production** as soon as it's ready with as little human intervention as possible.\n\n## Continuous Deployment\n\n**[Continuous Deployment][cdp]** is a software development practice in which every code change goes through\nthe entire pipeline and is put **into production automatically**, resulting in many production\ndeployments every day. It does everything that Continuous Delivery does, but the process is fully automated,\nthere's **no human intervention at all**.\n\nContinuous Deployment: TEST - BUILD - \u003Ci class=\"fas fa-cogs\" aria-hidden=\"true\" style=\"color: rgb(252,109,38) !important\">\u003C/i> - DEPLOY\n{: .alert .alert-warning .yellow}\n\nFor example, our website [about.GitLab.com], is **continuously deployed**. We commit multiple times a day to\nfeature-branches, and every push triggers a [parallel][doc-stages] **test and build**. Every time we merge to the\n`master` branch (and we do that a lot, every day), the code is tested, built, and **deployed to\nthe production** [environment][env], passing through the entire [pipeline][com-pipe].\nThere's **no further manual action** that triggers the deployment: it is an automated process, controlled by GitLab CI.\n\n## Challenges\n\n[Perforce performed a study][perforce] that revealed that most of the companies surveyed are using Continuous\nDelivery methods to ship their products:\n\n> _The [study] indicates that Continuous Delivery has really taken off: 65% say their companies have migrated at\nleast one project/team to Continuous Delivery practices._\n>\n_80% of SaaS companies are doing Continuous Delivery, compared to 51% of non-SaaS companies (like boxed or on-premise software, embedded systems or hardware, industrial goods, etc.)_\n>\n_Nearly everyone agrees on the vital role of the collaboration platform (version management, build automation, code review, etc.) in achieving Continuous Delivery. 96% said it’s important and 40% said it’s critical. No argument here._\n{: .justify}\n\nAnd they raised an interesting question:\n\nWhat’s the hardest thing about **Continuous Delivery**?\n{: .alert .alert-info}\n\nThe answer was:\n\nFor non-SaaS companies, it’s getting **automation technologies to integrate**.\n{: .alert .alert-success}\n\nWell, with GitLab, you have all of this, **fully-integrated into one single UI**. From [GitLab 8.10] on,\nyou can [perform Manual Actions][manual] and manually deploy your application with the click of a button,\nmaking Continuous Delivery easier than ever. Take a look.\n\nYou can manually **deploy** to staging:\n\n![Continuous Delivery - deploy to staging]{: .shadow}\n\nYou can also manually **deploy** to production:\n\n![Continuous Delivery - deploy to production]{: .shadow}\n\nAnd you are free to **rollback** to the previous state with the click of a button:\n\n![Continuous Delivery - rollback]{: .shadow}\n\n## From idea to production with GitLab\n\nOur Head of Product, [Mark Pundsack], created a demonstration which illustrates our built-in capabilities\nwith **GitLab CI**, **Continuous Deployment**, and **[Container Registry]** together, to develop **faster\nfrom idea to production**.\n\nIn his video, you can see how it's possible, within one single interface (GitLab), to do everything:\n\n- \u003Ci class=\"fas fa-info-circle fa-fw\" aria-hidden=\"true\">\u003C/i> Have an idea\n- \u003Ci class=\"fas fa-exclamation-circle fa-fw\" aria-hidden=\"true\">\u003C/i> Create an issue to discuss it with your team\n- \u003Ci class=\"fas fa-code fa-fw\" aria-hidden=\"true\">\u003C/i> Ship the code within a merge request\n- \u003Ci class=\"fas fa-terminal fa-fw\" aria-hidden=\"true\">\u003C/i> Run automated scripts (sequential or parallel)\n   - Build, test **and deploy** to a **staging environment**\n   - Preview the changes\n- \u003Ci class=\"far fa-edit fa-fw\" aria-hidden=\"true\">\u003C/i> Review the code and get it approved\n- \u003Ci class=\"fas fa-code-branch fa-fw\" aria-hidden=\"true\">\u003C/i> Merge the feature-branch into `master`\n   - **Deploy** your changes **automatically** to a **production environment**\n- \u003Ci class=\"fas fa-undo fa-fw\" aria-hidden=\"true\">\u003C/i> Rollback if something goes wrong\n{: .list-icons}\n\nThe most amazing thing is, you can track the entire process. Everything is\nfully-integrated with GitLab already; you don't need any other tools to deliver your software, nor jump\nbetween different applications and interfaces to track the process.\n\nThe full spectrum is clearly visible: the issue, the commits to the merge request, the reviews, the builds, the tests,\nthe deploys, the deployment history, the [container history], the environments and the [pipelines][mark-pipes].\n\nFurthermore, for this example demo configuration, every time you push code to the repository, even if it's\nto feature-branches, the pipeline runs **from build to deployment**. But instead of deploying to production,\nthese branches deploy to a staging environment. Production is only affected by the `master` branch.\n\nFor this particular case, Mark used [Docker Cloud] to deploy his app, but you are free to use your creativity to\noptimize your software development process with GitLab and its built-in development tools.\n\nCheck it out; it's awesome!\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/pY4IbEXxxGY\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n**Note:** we assume you know what Docker is, how to use it and how to deploy an app to [Docker Cloud].\n{: .note}\n\n## Conclusion\n\nThe terms Continuous **Delivery** and Continuous **Deployment** are confusing, but now hopefully you\nunderstand the difference between them. The goal is pushing code frequently, and having it tested,\nbuilt, and deployed. If you prefer having the human decision before deploying to production, GitLab\nallows you to do that with [Manual Actions][manual]. If you want a fully-automated process, with GitLab you\ncan do that too. Whatever strategy your company chooses, GitLab does the job, and does it well!\n\nOur development team works hard to offer the best solution for modern software development tools and techniques. We ship a new\nversion once a month, every 22nd, with more features and improvements, for making development faster and better.\n\nGitLab is unique: we go [from idea to production][direction] using one single interface that integrates all the tools we need!\n\nFollow [@GitLab] on Twitter and stay tuned for updates!",{"slug":8817,"featured":6,"template":678},"continuous-integration-delivery-and-deployment-with-gitlab","content:en-us:blog:continuous-integration-delivery-and-deployment-with-gitlab.yml","Continuous Integration Delivery And Deployment With Gitlab","en-us/blog/continuous-integration-delivery-and-deployment-with-gitlab.yml","en-us/blog/continuous-integration-delivery-and-deployment-with-gitlab",{"_path":8823,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8824,"content":8830,"config":8835,"_id":8837,"_type":16,"title":8838,"_source":17,"_file":8839,"_stem":8840,"_extension":20},"/en-us/blog/building-our-web-app-on-gitlab-ci",{"title":8825,"description":8826,"ogTitle":8825,"ogDescription":8826,"noIndex":6,"ogImage":8827,"ogUrl":8828,"ogSiteName":692,"ogType":693,"canonicalUrls":8828,"schema":8829},"Building our web-app on GitLab CI","5 reasons why Captain Train migrated from Jenkins to GitLab CI","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749684187/Blog/Hero%20Images/building-our-web-app-on-gitlab-ci-cover.jpg","https://about.gitlab.com/blog/building-our-web-app-on-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Building our web-app on GitLab CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Pierre de La Morinerie\"}],\n        \"datePublished\": \"2016-07-22\",\n      }",{"title":8825,"description":8826,"authors":8831,"heroImage":8827,"date":8833,"body":8834,"category":14},[8832],"Pierre de La Morinerie","2016-07-22","\n\nThe railway world is a fast-moving environment. To bring you the latest improvements and fixes as quick as possible, Captain Train’s web-app is often updated, sometimes several times per day.\n\nDid you always wonder how we manage building and deploying all of this without a jolt? Then read-on: here is a technical peek into our engineering process.\n\n**Note:** this post tells the customer story of [Captain Train][cap].\n{: .note}\n\n\u003C!-- more -->\n\n## From Jenkins to GitLab CI\n\nWe used to build our web-app using [Jenkins]. A robust and proven solution—which was polling our repositories every minute, and built the appropriate integration and production branches.\n\nHowever we recently switched to a new system for building our web-app. To host our source-code and perform merge-requests, we’re using a self-managed instance of [GitLab]. It’s nice, open-source—and features an integrated build system: [GitLab CI].\n\nSee it like Travis, but integrated: just add a custom `.gitlab-ci.yml` file at the root of your repository, and GitLab will automatically start building your app in the way you specified.\n\nNow what’s cool about this?\n\n## Reliable dockerized builds\n\nJenkins builds were all executed on a resource-constrained server—and this made builds slow and unreliable. For instance, we observed several times PhantomJS crashing randomly during tests: apparently it didn’t like several builds running on the same machine at the same time—and a single PhantomJS process crashing would bring all of the others down.\n\nSo the first step of our migration was to insulate builds into Docker containers. In this way:\n\n- **Every build is isolated from the others**, and processes don’t crash each other randomly.\n- **Building the same project on different architectures is easy**, and that’s good news, because we need this to support multiple Debian versions.\n- Project maintainers have **greater control on the setup of their build environment**: no need to bother an admin when upgrading an SDK on the shared build machine.\n\n## It scales\n\nGitLab CI allows us to add more runners very easily. And now that builds are performed in Docker containers, we don’t have to configure the runners specifically with our build tools: any out-of-the-box server will do.\n\nOnce a new runner is declared, **scaling is automatic**: the most available runner will be picked to start every new build. It’s so simple that you can even add your own machine to build locally.\n\nWe’ve already reduced our build time by switching to a more powerful runner—a migration that would have been more difficult to do using Jenkins. Although we regularly optimize the run time of our test suite, sometimes you also need to just throw more CPU at it.\n\n## Easier to control\n\nWith Jenkins, the configuration of the build job is stored in an external admin-restricted tool. You need the right credentials to edit the build configuration, and it’s not obvious how to do it.\n\nUsing GitLab CI, the build jobs are determined solely from the `.gitlab-ci.yml` file in the repository. This makes it really simple to edit, and you get all the niceties of your usual git work-flow: versioning, merge requests, and so on. You don’t need to ask permission to add CI to your project. Lowering the barrier to entry for CI is definitely a good thing for engineering quality and developer happiness.\n\n## Tests on merge requests\n\nGitLab CI makes it really easy to build and test the branch of a **merge request** (or a _“Pull request”_ in GitHub slang). Just a few lines added to our `.gitlab-ci.yml` file, and we were running tests for every push to a merge request.\n\n![Merge automatically when the build succeeds][merge]{: .shadow}\n\nWe get nice red-or-green-status, the quite useful _“Merge automatically when the build succeeds”_ button — and, as branches are now tested before being merged, much less build breakage.\n\n![Build Passed][build]{: .shadow}\n\n## A slick UI\n\nGitLab CI provides _“Pipelines”_, an overview of all your build jobs. This points you quickly to a failing build, and the stage where the problem occurs. Plus it gets you this warm and fuzzy feeling of safeness when everything is green.\n\n![Pipelines]{: .shadow}\n\n## In a nutshell\n\nWe found the overall experience quite positive. Once the initial hurdle of making the build pass in a Docker container, integrating it into GitLab CI was really easy. And it gave us tons of positive signals, new features and neat integrations. 10/10, would build again.👍\n\nOur Android team also migrated their pipeline, and are now building the integration and production Android APK with GitLab CI.\n\nFor further reading, you can find on the official website a nice [overview of GitLab CI features][GitLab CI], and some [examples of `.gitlab-ci.yml` files][CI examples].\n\n_This post was originally [published by Captain Train][cap-post]._\n\n_[Captain Train][cap], the European train ticketing company, makes buying train tickets faster, easier, and ad-free. Their goal is to revolutionize the purchase of train tickets and their doing it by engineering the best user-experience._\n{: .note}\n\n\u003C!-- identifiers -->\n\n[build]: /images/blogimages/cross-post-gitlab-ci/build-passed.png\n[cap]: https://www.captaintrain.com\n[cap-post]: https://blog.captaintrain.com/12703-building-on-gitlab-ci\n[Jenkins]: https://jenkins.io/\n[GitLab]: \n[GitLab CI]: /solutions/continuous-integration/ [CI examples]: https://docs.gitlab.com/ee/ci/quick_start/\n[merge]: /images/blogimages/cross-post-gitlab-ci/merge-when-build-succeeds.png\n[pipelines]:/images/blogimages/cross-post-gitlab-ci/pipelines.png\n",{"slug":8836,"featured":6,"template":678},"building-our-web-app-on-gitlab-ci","content:en-us:blog:building-our-web-app-on-gitlab-ci.yml","Building Our Web App On Gitlab Ci","en-us/blog/building-our-web-app-on-gitlab-ci.yml","en-us/blog/building-our-web-app-on-gitlab-ci",{"_path":8842,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8843,"content":8849,"config":8853,"_id":8855,"_type":16,"title":8856,"_source":17,"_file":8857,"_stem":8858,"_extension":20},"/en-us/blog/markdown-kramdown-tips-and-tricks",{"title":8844,"description":8845,"ogTitle":8844,"ogDescription":8845,"noIndex":6,"ogImage":8846,"ogUrl":8847,"ogSiteName":692,"ogType":693,"canonicalUrls":8847,"schema":8848},"Markdown Kramdown Tips and Tricks","Learn how to apply classes to markdown, create ToCs, embed iframes and much more!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671850/Blog/Hero%20Images/markdown-kramdown-tips-and-tricks-cover.png","https://about.gitlab.com/blog/markdown-kramdown-tips-and-tricks","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Markdown Kramdown Tips and Tricks\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Marcia Ramos\"}],\n        \"datePublished\": \"2016-07-19\",\n      }",{"title":8844,"description":8845,"authors":8850,"heroImage":8846,"date":8851,"body":8852,"category":14},[8399],"2016-07-19","\nIf you use a markdown engine for writing your website content and you'd like to\nlearn a few tricks to have more freedom with it, this post is for you.\n\nThe markdown engine we use for [about.GitLab.com] is [Kramdown], and that is the one\nwe'll be referring to on this post.\n\n**Note:** We assume you already know what a markdown engine is and how it is applied to a website.\n{: .note}\n\n\u003C!-- more -->\n\n----\n\n## On this post\n{: .no_toc}\n\n- TOC\n{:toc}\n\n----\n\n## Our Markdown Guide\n\nLast week a lot of [people were happy][news] for our [Handbook] being open source, as we explained\nin details on the post \"[Our Handbook is open source: here's why][handbook-post]\".\nEvery GitLab Team member does touch our website, starting on his or her first weeks,\nas part of the onboarding tasks.\nDoesn't matter if he or she is an advanced programmer or never have seen an HTML code before,\ncollaborating is the key for making sure we are all on the same side. And we love it!\n\nOne of our Handbook pages is a full [Markdown Guide][guide] for the markup\nthat we use in our website, generated by [Middleman].\nIt brings a lot of details on how to use Kramdown for writing content.\nEvery markdown page of this website, which is an [open\nsource project][www-GitLab-com] available for peeking and contributing, can use any\nof the rules explained there. This April we changed the markdown engine from RDiscount to\nKramdown, and not everybody in our Team knew the new \"magical\" stuff we could use from this change. That's\nwhy we decided that writing a guide would be useful for those already used to markdown, and\nhelpful for those completely new to it.\n\n## Why Kramdown\n\nPerhaps your first question will be something like \"okay, why is Kramdown so special?\". My first\nexperience with markdown was when I first used a [Static Site Generator][SSGs], Jekyll. Coming from\nprevious experiences in web development on PHP and HTML, the first thing I wanted to do to a\nmarkdown post was adding a class to a particular heading. When I googled for that, I was pretty\ndisappointed because apparently we aren't supposed to apply classes inline into markdown files.\nSo, I had to experiment a lot until I got the desired result: add some color to my heading.\n\nAfter trying a lot of new tweaks, and digging through the web for answers that insisted on not coming, I finally\nfound out that with Kramdown, yes, I could do a lot of things. And finally I could apply some inline classes\nthrough my posts and have my blue headings when I wanted them blue. But at that time, I hadn't noticed\nthat we could do some really great magic with it, and that's what I'm sharing with you in this post.\n\n## The magic\n\nWe could say that the Kramdown magic concentrates to the following syntax: `{: something}`.\nThis little devil is the basis of a lot of awesome resources.\n\nLet's go over a few of them now, but you'll find a lot more in our [Markdown Guide][guide].\n\n## Classes, IDs and Attributes\n\n{::options parse_block_html=\"true\" /}\n\nLet's start with something classic, as the ability of [applying CSS classes, custom IDs, and custom\nattributes][classes] to the elements.\n\n### Applying classes\n\nIf you think of any CSS class, what comes into your mind first? I suppose it's something like:\n\n```css\n.blue {\n  color: blue;\n}\n```\n\nOkay, we have a `.blue` class. Let's say once in a while we want a blue paragraph or a blue heading. Just do:\n\n```md\nThis is a paragraph that for some reason we want blue.\n{: .blue}\n```\n\nAnd of course, the output will be:\n\n\u003Cdiv class=\"panel panel-info\">\n**Output**\n{: .panel-heading}\n\u003Cdiv class=\"panel-body\">\nThis is a paragraph that for some reason we want blue.\n{: .blue}\n\u003C/div>\n\u003C/div>\n\nAnd if we want a blue heading, we do exact the same thing:\n\n```md\n#### A blue heading\n{: .blue}\n```\n\nAnd the output is going to behave as we expect it to:\n\n\u003Cdiv class=\"panel panel-info\">\n**Output**\n{: .panel-heading style=\"margin-bottom:10px\"}\n\u003Cdiv class=\"panel-body\">\n#### A blue heading\n{: .blue .no_toc}\n\u003C/div>\n\u003C/div>\n\nWhat if I want to apply two classes at the same time?\n\n```md\nA blue and bold paragraph.\n{: .blue .bold}\n```\n\nAnd the output will be as expected:\n\n\u003Cdiv class=\"panel panel-info\">\n**Output**\n{: .panel-heading style=\"margin-bottom:10px\"}\n\u003Cdiv class=\"panel-body\">\nA blue and bold paragraph.\n{: .blue .bold}\n\u003C/div>\n\u003C/div>\n\nAs simple as that! The markup is simple and intuitive.\n\nNow, guess what, we can do exactly the same for IDs!\n\n### Custom IDs\n\nKramdown itself will append an ID for each heading, automatically. The ID will be all the words\nin the heading together, connected by dashes. For the example above, \"A blue heading\", the HTML output ID\nwill be `a-blue-heading`:\n\n```html\n\u003Ch4 class=\"blue\" id=\"a-blue-heading\">A blue heading\u003C/h4>\n```\n\nLet's say we want the ID called `blue-h`:\n\n```md\n#### A blue heading\n{: .blue #blue-h}\n```\n\nWill produce exactly what it's meant to (a blue heading with the custom ID):\n\n```html\n\u003Ch4 class=\"blue\" id=\"blue-h\">A blue heading\u003C/h4>\n```\n\nSo, the output would be:\n\n\u003Cdiv class=\"panel panel-info\">\n**Output**\n{: .panel-heading style=\"margin-bottom:10px\"}\n\u003Cdiv class=\"panel-body\">\n#### A blue heading\n{: .blue .no_toc #blue-h}\n\u003C/div>\n\u003C/div>\n\nNote that we can attribute both class and ID in one markup, as in `{: .class #custom-id}`. But we can use\njust one of them too: `{: .class}` or `{: #custom-id}`.\n{: .alert .alert-warning}\n\nInteresting, isn't it?\n\n### Custom Attributes\n\nYes, we can go even further and apply any _key/value_ pair we need:\n\n```md\nAn example of key/value pair\n{: .class #id key=\"value\"}\n```\n\nWe can use them, for example, for quickly applying general styles:\n\n```\n#### A simple example\n{: #custom-id style=\"margin-top:0\"}\n```\n\nBut they are specially useful for links, as in:\n\n```md\n[text][identifier]{: #custom-id key=\"value\"}\n```\n\nThis way we can call a JavaScript function, for example:\n\n```md\n[CLICK ME][identifier]{: #custom-id onclick=\"myJsFunc();\"}\n\n\u003Cscript type=\"text/javascript\">\n  function myJsFunc() {\n  var answer = confirm (\"Please click on OK to continue.\")\n  if (answer)\n  window.location=\"#\";\n  }\n\u003C/script>\n```\n\n\u003Cdiv class=\"panel panel-info\">\n**Output**\n{: .panel-heading}\n\u003Cdiv class=\"panel-body\">\n[CLICK ME][identifier]{: #custom-id onclick=\"myJsFunc();\"}\n\n\u003Cscript type=\"text/javascript\">\nfunction myJsFunc() {\nvar answer = confirm (\"Please click on OK to continue.\")\nif (answer)\nwindow.location=\"#\";\n}\n\u003C/script>\n\u003C/div>\n\u003C/div>\n\n----\n\n## Table of Contents (ToC)\n\nA ToC is so awesome and easy to produce. Have you noticed [our ToC](#on-this-post)\non this post? It's generated automatically by Kramdown with this simple markup:\n\n```md\n- TOC\n{:toc}\n```\n\nAll the file headings will be all automatically included in the ToC, except for those we don't want there.\nFor these, we apply a class called `no_toc`, and Kramdown will respect our will:\n\n```md\n#### This heading will not be included in the ToC.\n{: .no_toc}\n```\n\nAnd of course, we can make the ToC an ordered list instead of unordered:\n\n```md\n1. TOC\n{:toc}\n```\n\nAwesome, isn't it?\n\n----\n\n## HTML Blocks\n\nWhenever we need HTML blocks, we can use them freely!\n\n```html\n\u003Cdiv>\n  \u003Cp>Hello World\u003C/p>\n\u003C/div>\n```\n\nIn our [Marketing Handbook] you will find plenty of them.\n\n### Font Awesome\n\n[Font Awesome] is a good use-case for HTML blocks within markdown files.\n\nCheck this!\n\n```html\nWe \u003Ci class=\"fas fa-heart\" aria-hidden=\"true\" style=\"color:#c7254e\">\u003C/i> GitLab!\n```\n\n\u003Cdiv class=\"panel panel-info\">\n**Output**\n{: .panel-heading style=\"margin-bottom:10px\"}\n\u003Cdiv class=\"panel-body\">\nWe \u003Ci class=\"fas fa-heart\" aria-hidden=\"true\" style=\"color:#c7254e\">\u003C/i> GitLab!\n\u003C/div>\n\u003C/div>\n\n### Iframes\n\nWe can embed anything within `\u003Ciframe>` tags, such as [YouTube and Vimeo videos][videos],\nGoogle and OneDrive [documents][docs] and anything else available in iframes:\n\n```html\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/NoFLJLJ7abE\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n```\n\nWe are using the class `video_container` to make it [responsive].\n\n\u003Cdiv class=\"panel panel-info\">\n**Output**\n{: .panel-heading style=\"margin-bottom:10px\"}\n\u003Cdiv class=\"panel-body\">\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://www.youtube.com/embed/NoFLJLJ7abE\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C/div>\n\u003C/div>\n\n### CodePen\n\n[CodePens] are really good for some cases, when you want to display codes and results, for example. Check this cute dog,\ncreated with HTML and Sass:\n\n```html\n\u003Cp data-height=\"431\" data-theme-id=\"dark\" data-slug-hash=\"OXzjLL\" data-default-tab=\"html,result\" data-user=\"virtuacreative\" data-embed-version=\"2\" class=\"codepen\">See the Pen \u003Ca href=\"http://codepen.io/virtuacreative/pen/OXzjLL/\">Dog\u003C/a> by Virtua Creative (\u003Ca href=\"http://codepen.io/virtuacreative\">@virtuacreative\u003C/a>) on \u003Ca href=\"http://codepen.io\">CodePen\u003C/a>.\u003C/p>\n\u003Cscript async src=\"//assets.codepen.io/assets/embed/ei.js\">\u003C/script>\n```\n\n\u003Cp data-height=\"431\" data-theme-id=\"dark\" data-slug-hash=\"OXzjLL\" data-default-tab=\"html,result\" data-user=\"virtuacreative\" data-embed-version=\"2\" class=\"codepen\">See the Pen \u003Ca href=\"http://codepen.io/virtuacreative/pen/OXzjLL/\">Dog\u003C/a> by Virtua Creative (\u003Ca href=\"http://codepen.io/virtuacreative\">@virtuacreative\u003C/a>) on \u003Ca href=\"http://codepen.io\">CodePen\u003C/a>.\u003C/p>\n\u003Cscript async src=\"//assets.codepen.io/assets/embed/ei.js\">\u003C/script>\n\n----\n\n## Mix HTML with Markdown\n\nYes, we definitely can do this! We need to add the following markup to the markdown document before mixing up\nHTML and markdown:\n\n```md\n{::options parse_block_html=\"true\" /}\n```\n\nAnd we can close it any time, if necessary:\n\n```md\n{::options parse_block_html=\"false\" /}\n```\n\nThis is going to make this:\n\n```html\nSomething in **markdown**.\n\n\u003Cp>Then an HTML tag with crazy **markup**!\u003C/p>\n```\n\nTo be displayed like this:\n\n\u003Cdiv class=\"panel panel-info\">\n**Output**\n{: .panel-heading style=\"margin-bottom:10px\"}\n\u003Cdiv class=\"panel-body\">\nSomething in **markdown**.\n\u003Cp>Then an HTML tag with crazy **markup**!\u003C/p>\n\u003C/div>\n\u003C/div>\n\nBlue boxes, like this one above, used to [display the outputs][boxes] on this post, were generated with this resource.\n\n----\n\n## Styles\n\nOne of the most useful features is the ability to add `\u003Cstyle>` tags to our markdown file too!\nWe can do that for simply styling our web page without affecting the entire site. Just go on and add the\ntag to any part of your markdown:\n\n```html\n\u003Cstyle>\n.blue {\n  color: blue;\n}\n.bold {\n  font-weight: bold;\n}\n\u003C/style>\n```\n\nThis tag was applied to this very document to exemplify this case, also to help with the classes described\n[earlier in this post](#applying-classes).\n\n----\n\n## Conclusion\n\nThere is a lot more you can do, mix, and bring together using [Kramdown]. It's awesome! Check out\nour [Markdown Guide][guide] for more resources, examples and applications and use your creativity to create\nbeautiful posts, with great styles!\n\nAnything else you know of and is not in our [Guide]? Any new magic?\nAny trick? Please [contribute] by submitting an [MR] to the\n[source file]. Your collaboration is much appreciated.\n\nHappy markdowning!\n\nFollow [@GitLab] and stay tunned for the next post!\n\n\u003C!-- identifiers -->\n\n[about.GitLab.com]: /\n[@GitLab]: https://twitter.com/GitLab\n[boxes]: https://handbook.gitlab.com/docs/markdown-guide/\n[classes]: https://handbook.gitlab.com/docs/markdown-guide/\n[CodePens]: https://codepen.io\n[contribute]: https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/CONTRIBUTING.md\n[docs]: https://handbook.gitlab.com/docs/markdown-guide/#embed-documents\n[font awesome]: http://fontawesome.io/\n[guide]: https://handbook.gitlab.com/docs/markdown-guide/\n[Handbook]: https://handbook.gitlab.com/\n[handbook-post]: /blog/our-handbook-is-open-source-heres-why/\n[identifier]: #\n[Kramdown]: http://kramdown.gettalong.org/\n[Marketing Handbook]: https://handbook.gitlab.com/handbook/marketing/\n[Middleman]: https://middlemanapp.com/\n[mr]: https://docs.gitlab.com/ee/user/project/merge_requests/creating_merge_requests.html \"Merge Request\"\n[news]: https://news.ycombinator.com/item?id=12091638\n[responsive]: https://css-tricks.com/NetMag/FluidWidthVideo/Article-FluidWidthVideo.php\n[SSGs]: /blog/ssg-overview-gitlab-pages-part-2/\n[videos]: https://handbook.gitlab.com/docs/markdown-guide/#videos\n[www-GitLab-com]: https://gitlab.com/gitlab-com/www-gitlab-com\n[source file]: https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/source/handbook/markdown-guide/index.html.md\n\n\u003Cstyle>\n.blue {\n  color: blue !important;\n}\n.bold {\n  font-weight: bold;\n}\n\u003C/style>\n",{"slug":8854,"featured":6,"template":678},"markdown-kramdown-tips-and-tricks","content:en-us:blog:markdown-kramdown-tips-and-tricks.yml","Markdown Kramdown Tips And Tricks","en-us/blog/markdown-kramdown-tips-and-tricks.yml","en-us/blog/markdown-kramdown-tips-and-tricks",{"_path":8860,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8861,"content":8867,"config":8872,"_id":8874,"_type":16,"title":8875,"_source":17,"_file":8876,"_stem":8877,"_extension":20},"/en-us/blog/how-to-setup-a-gitlab-instance-on-microsoft-azure",{"title":8862,"description":8863,"ogTitle":8862,"ogDescription":8863,"noIndex":6,"ogImage":8864,"ogUrl":8865,"ogSiteName":692,"ogType":693,"canonicalUrls":8865,"schema":8866},"How to Set Up a GitLab Instance on Microsoft Azure","Learn how to set up a GitLab Instance on Microsoft Azure with this tutorial","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672257/Blog/Hero%20Images/gitlab-on-azure-cover.jpg","https://about.gitlab.com/blog/how-to-setup-a-gitlab-instance-on-microsoft-azure","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to Set Up a GitLab Instance on Microsoft Azure\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dave Wentzel\"}],\n        \"datePublished\": \"2016-07-13\",\n      }",{"title":8862,"description":8863,"authors":8868,"heroImage":8864,"date":8870,"body":8871,"category":14},[8869],"Dave Wentzel","2016-07-13","\n\n> Note: This article has been moved to [our documentation](https://docs.gitlab.com/ee/install/azure/index.html) so it can be more easily updated. We recommend using [the Azure installation documentation](https://docs.gitlab.com/ee/install/azure/index.html).\n\nGitLab is a scalable, self-managed Git repository \"ecosystem\". It is available as\na free [Community Edition][ce] and as a subscription-based\n[Enterprise Edition][ee]. If you want to host your own full-featured source\ncontrol system, under your control, then you should consider GitLab. Spinning up\nyour own instance can be done in just a few hours using the\n[Omnibus packages](/blog/using-omnibus-gitlab-to-ship-gitlab/).\n\nBut what if you don't want to invest that much time to see if GitLab is\nfor you? Does Linux scare you? Do you want to try GitLab quickly without\na big up-front investment? Need someone else to handle your GitLab\nadministration? [Microsoft Azure] may be the answer.\n\n**Note:** we assume you are familiar with GitLab and you wish to have your own\nGitLab instance on-premises, working in a Virtual Machine.\n{: .note}\n\n\u003C!-- more -->\n\n----\n\n### What's in this tutorial?\n{: .no_toc}\n\n- TOC\n{:toc}\n\n----\n\n## GitLab on Azure\n\nAzure is Microsoft's business cloud and GitLab is a pre-configured\noffering on the Azure Marketplace. Hopefully you aren't surprised to\nhear that [Microsoft and Azure have embraced][ms-open]\nopen source software like Ubuntu, Red Hat Enterprise Linux, and GitLab.\nYou can now spin up a pre-configured GitLab VM in just a few clicks.\nLet's get started.\n\n## Getting started\n\nFirst you need an account on Azure. There are three ways to do this:\n\n- If your company (or you) already has an account then you are ready to go!\n- You can [open an Azure account for free][free-trial]. You get credits you can\n  use to try out paid Azure services, and even after you've used them you can\n  still keep the account and use the free Azure services. Your credit card won't\n  be charged, unless you decide to pay-as-you-go. This is a great way to try out\n  Azure and cloud computing.\n- If you have an MSDN subscription you can [activate your Azure subscriber benefits][msdn-benefits].\n  Your MSDN subscription gives you recurring Azure credits every month. Why not\n  put those credits to use and try out GitLab!\n\n## Working with Azure\n\nNow that you have an account we can get started. When you log in to\nAzure using [portal.azure.com] you will see\nthe Dashboard which gives you a quick overview of Azure resources:\n\n![Microsoft Azure Dashboard](https://about.gitlab.com/images/blogimages/gitlab-azure/azure-welcome-screen.png)\n\nFrom the Dashboard you can build VMs, create SQL Databases, author\nwebsites, and perform lots of other cloud tasks. Today we want to try\nGitLab which is part of the [Azure Marketplace][marketplace]. The\nMarketplace is an online store for pre-configured applications and\nservices optimized for the cloud by software vendors like GitLab. Click\non the **+ New** icon and in the search box type \"GitLab\":\n\n![Search for GitLab on Azure Marketplace](https://about.gitlab.com/images/blogimages/gitlab-azure/azure-dashboard-search-gitlab.png)\n\n## Create new VM\n\nAzure Marketplace offerings are always changing but let's click \"**GitLab\nCommunity Edition**\". [GitLab CE][ce] is freely available under the MIT Expat\nLicense. A new \"blade\" window will pop-out, where you can read about the\noffering.\n\n![Search for GitLab on Azure Marketplace](https://about.gitlab.com/images/blogimages/gitlab-azure/azure-dashboard-select-gitlab.png)\n\nClick \"**Create**\" and you will be presented with the \"Create virtual machine\"\nblade.\n\n### Basics\n\nThe first thing we need to configure are the basic settings of the underlying\nUbuntu 14.04.4 VM. On the screenshot below, I set the hostname to \"GitLab-CE\" and I\nchose Password authentication to keep things simple. This is the password that we will use later to SSH into the VM, so make sure it's a strong password/passphrase.\nAlternatively you can choose to paste your SSH public key so that you don't type\nyour password every time. A \"Resource group\" is a way to group related resources\ntogether for easier administration. I named mine \"GitLab-CE-Azure\", but your\nresource group can have the same name as your VM. Click OK when ready.\n\n![GitLab on Azure - Basic settings](https://about.gitlab.com/images/blogimages/gitlab-azure/azure-create-vm-basics.png)\n\n### Size\n\nThe next screen reviews the Pricing Tiers, which are the VM sizes. I\nchose a \"**D1 Standard**\" VM, which meets the minimum system requirements to\nrun a small GitLab environment. When ready click 'Select'.\n\n\u003Ci class=\"fas fa-info-circle\" aria-hidden=\"true\" style=\"color: rgb(49, 112, 143);\">\u003C/i>\nBy default, only the recommended tiers are shown. To choose a larger one click\non 'View all'.\n{: .alert .alert-info}\n\n![Choose a VM size for GitLab on Azure](https://about.gitlab.com/images/blogimages/gitlab-azure/azure-create-vm-size.png)\n\n### Settings\n\nOn the next blade, you are called to configure the Storage, Network and\nAvailability settings. Just review them and take the defaults which are sufficient\nfor test-driving GitLab. Hit OK when done.\n\n![Configure various settings](https://about.gitlab.com/images/blogimages/gitlab-azure/azure-create-vm-settings.png)\n\n### Summary\n\nOn the summary page you will have the chance to review your choices so far. If\nyou change your mind about something, you can go back to the previous steps and\namend your choice. Hit OK when ready.\n\n![Azure create VM - Summary](https://about.gitlab.com/images/blogimages/gitlab-azure/azure-create-vm-summary.png)\n\n### Buy\n\nThis is the last step and you are presented with the price/hour your new VM\nwill cost. You can see that we are billed only for the VM at this page, GitLab\nCE is a separate tile which is free to use. Go on and click **Purchase** for\nthe deployment to begin.\n\n![Azure create VM - Summary](https://about.gitlab.com/images/blogimages/gitlab-azure/azure-create-vm-buy.png)\n\n### Deployment page\n\nAt this point, Azure takes over and begins deploying your GitLab Ubuntu VM. You\ncan scroll down to see the deployment process which takes a few minutes.\n\n![Azure deploying GitLab](https://about.gitlab.com/images/blogimages/gitlab-azure/azure-deploy-vm.png)\n\nWhen GitLab environment is ready, you will see the management blade for your\nnew VM. This is basically your VM dashboard where you can configure many things\nlike the DNS name of your instance.\n\n![GitLab VM settings](https://about.gitlab.com/images/blogimages/gitlab-azure/azure-deploy-gitlab-settings.png)\n\n### Set up a domain name\n\nThe public IP address that the VM uses is shown in the 'Essentials' blade. Click\non it and select **Configuration** under the 'General' tab. Enter a friendly\nDNS name for your instance in the DNS name label field.\n\n![Setting up a DNS name label for your IP](https://about.gitlab.com/images/blogimages/gitlab-azure/azure-gitlab-dns.png)\n\nIn the screenshot above I have set my DNS name to\n`gitlab-ce-test.xxx.cloudapp.azure.com`. Hit **Save** for the changes to take\neffect.\n\n\u003Ci class=\"fas fa-info-circle\" aria-hidden=\"true\" style=\"color: rgb(49, 112, 143);\">\u003C/i>\nIf you want to use your own domain name, add a DNS `A` record into your\ndomain registrar pointing to the IP address displayed given by Azure.\n{: .alert .alert-info}\n\n## Connecting to GitLab\n\nUse the IP address or the domain name you set up from the previous step to\nvisit GitLab on your browser.\n\nThe first time you hit the URL, you will be asked to set up a new password\nfor the administrator user that GitLab has created for you.\n\n![GitLab first screen - choose password for admin user](https://about.gitlab.com/images/blogimages/gitlab-azure/gitlab-ce-first-access.png)\n\nOnce you change the password you will be redirected to login. Use `root` as the\nusername and the password you configured just before.\n\n![GitLab first screen - login admin user](https://about.gitlab.com/images/blogimages/gitlab-azure/gitlab-ce-first-login.png)\n\nAt this point you have a working GitLab VM running on Azure. Congratulations!\n\n## Creating your first GitLab project\n\nYou can skip this section if you are familiar with Git and GitLab.\nOtherwise, let's create our first project. From the Welcome page click\n**New Project**.\n\n![Welcome to GitLab](https://about.gitlab.com/images/blogimages/gitlab-azure/gitlab-ce-welcome.png)\n\nI'm going to make this a private project called \"demo\":\n\n![GitLab - create new project](https://about.gitlab.com/images/blogimages/gitlab-azure/gitlab-ce-create-project.png)\n\nIt only takes a few moments to create the project and the next screen\nwill show you the commands to begin working with your new repository\nlocally.\n\n![GitLab - project git config](https://about.gitlab.com/images/blogimages/gitlab-azure/gitlab-ce-new-project.png)\n\nFollowing these instructions you should be able to push and pull from\nyour new GitLab repository.\n\nThat's it! You have a working GitLab environment!\n\n## Maintaining your GitLab instance\n\nIt's important to keep your GitLab environment up-to-date and since the GitLab\nteam is constantly making enhancements to the product, occasionally you may\nneed to upgrade for security reasons.\n\nLet's review how to upgrade GitLab. When you click on the \"Admin Area\" wrench,\nGitLab will tell you whether there are updates available. In the following\nscreenshot we are told to update ASAP, and this is because there is a\nsecurity fix.\n\n![GitLab - update asap](https://about.gitlab.com/images/blogimages/gitlab-azure/gitlab-ce-update-asap.png)\n\nOn the screenshot we can see the current Azure Marketplace offered GitLab CE\nversion 8.6.5, and there is an update available. To update you need to connect\nto your Ubuntu server using [PuTTY] or an equivalent SSH tool. Remember to log\nin with the username and password you specified [when you created](#basics)\nyour Azure VM.\n\nIn your terminal type in the following to connect to your VM:\n\n```bash\nssh user@gitlab-ce-test.westeurope.cloudapp.azure.com\n```\n\nProvide your password at the prompt to authenticate.\n\n\u003Ci class=\"fas fa-info-circle\" aria-hidden=\"true\" style=\"color: rgb(49, 112, 143);\">\u003C/i>\nYour domain name will differ and is the one we [set up previously](#set-up-a-domain-name).\nYou can also use the public IP instead of the domain name.\n{: .alert .alert-info}\n\nOnce you login, use the following command to upgrade GitLab to the latest\nversion.\n\n```bash\nsudo apt-get update && sudo apt-get install gitlab-ce\n```\n\nOnce it completes you should have an up-to-date GitLab instance!\n\n![GitLab up to date](https://about.gitlab.com/images/blogimages/gitlab-azure/gitlab-ce-up-to-date.png)\n\n## Conclusion\n\nGitLab is a great Git repo tool, plus a whole lot more. In this post we\nlooked at how to run GitLab using the Azure Marketplace offering. Azure\nis a great way to experiment with GitLab. If you decide, like me, that\nGitLab is the best [solution for source code management](/solutions/source-code-management/) you can continue\nto use Azure as your secure, scalable cloud provider.\n\n## About guest author\n\nThis is a guest blog post by Dave Wentzel, a Data Solution Architect\nwith Microsoft. \u003C!-- Dave: improve at will, if you wish. -->\n\n\n\u003C!-- Identifiers -->\n\n[ce]: /downloads/\n[ee]: /pricing/\n[free-trial]: https://azure.microsoft.com/en-us/free/\n[msdn-benefits]: https://azure.microsoft.com/en-us/pricing/member-offers/msdn-benefits-details/?WT.mc_id=A261C142F\n[marketplace]: https://azure.microsoft.com/en-us/marketplace/\n[Microsoft Azure]: https://azure.microsoft.com/en-us/\n[ms-open]: https://stackoverflow.com/questions/33653726/azure-file-share-backup-database-to-mounted-drive\n[portal.azure.com]: https://portal.azure.com\n[putty]: http://www.putty.org/\n",{"slug":8873,"featured":6,"template":678},"how-to-setup-a-gitlab-instance-on-microsoft-azure","content:en-us:blog:how-to-setup-a-gitlab-instance-on-microsoft-azure.yml","How To Setup A Gitlab Instance On Microsoft Azure","en-us/blog/how-to-setup-a-gitlab-instance-on-microsoft-azure.yml","en-us/blog/how-to-setup-a-gitlab-instance-on-microsoft-azure",{"_path":8879,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8880,"content":8885,"config":8889,"_id":8891,"_type":16,"title":8892,"_source":17,"_file":8893,"_stem":8894,"_extension":20},"/en-us/blog/get-started-with-openshift-origin-3-and-gitlab",{"title":8881,"description":8882,"ogTitle":8881,"ogDescription":8882,"noIndex":6,"ogImage":4861,"ogUrl":8883,"ogSiteName":692,"ogType":693,"canonicalUrls":8883,"schema":8884},"Get started with OpenShift Origin 3 and GitLab","In this tutorial, we will see how to deploy GitLab in OpenShift using GitLab's official Docker image","https://about.gitlab.com/blog/get-started-with-openshift-origin-3-and-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Get started with OpenShift Origin 3 and GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Achilleas Pipinellis\"}],\n        \"datePublished\": \"2016-06-28\",\n      }",{"title":8881,"description":8882,"authors":8886,"heroImage":4861,"date":8887,"body":8888,"category":14},[8273],"2016-06-28","\n> Note: This article is deprecated. It is now recommended to use the official\n> Kubernetes Helm charts for installing GitLab to OpenShift. Check out the\n> [official installation docs](https://gitlab.com/charts/gitlab/blob/master/doc/cloud/openshift.md)\n> for details.\n\n> Note: This article has been moved to a [technical article](https://docs.gitlab.com/ee/install/openshift_and_gitlab/index.html) so it can be more easily updated. We recommend using [the article](https://docs.gitlab.com/ee/install/openshift_and_gitlab/index.html).\n\n[OpenShift Origin][openshift] is an open source container application\nplatform created by [RedHat], based on [kubernetes] and [Docker]. That means\nyou can host your own PaaS for free and almost with no hassle.\n\nIn this tutorial, we will see how to deploy GitLab in OpenShift using GitLab's\nofficial Docker image while getting familiar with the web interface and CLI\ntools that will help us achieve our goal.\n\n## Prerequisites\n\nOpenShift 3 is not yet deployed on RedHat's offered Online platform ([openshift.com]),\nso in order to test it, we will use an [all-in-one Virtualbox image][vm] that is\noffered by the OpenShift developers and managed by Vagrant. If you haven't done\nalready, go ahead and install the following components as they are essential to\ntest OpenShift easily:\n\n- [VirtualBox]\n- [Vagrant]\n- [OpenShift Client][oc] (`oc` for short)\n\nIt is also important to mention that for the purposes of this tutorial, the\nlatest Origin release is used:\n\n- **oc** `v1.3.0` (must be [installed][oc-gh] locally on your computer)\n- **openshift** `v1.3.0` (is pre-installed in the [VM image][vm-new])\n- **kubernetes** `v1.3.0` (is pre-installed in the [VM image][vm-new])\n\nIf you intend to deploy GitLab on a production OpenShift cluster, there are some\nlimitations to bare in mind. Read on the [limitations](#current-limitations)\nsection for more information and follow the linked links for the relevant\ndiscussions.\n\nNow that you have all batteries, let's see how easy it is to test OpenShift\non your computer.\n\n## Getting familiar with OpenShift Origin\n\nThe environment we are about to use is based on CentOS 7 which comes with all\nthe tools needed pre-installed: Docker, kubernetes, OpenShift, etcd.\n\n### Test OpenShift using Vagrant\n\nAs of this writing, the all-in-one VM is at version 1.3, and that's\nwhat we will use in this tutorial.\n\nIn short:\n\n1. Open a terminal and in a new directory run:\n   ```sh\n   vagrant init openshift/origin-all-in-one\n   ```\n1. This will generate a Vagrantfile based on the all-in-one VM image\n1. In the same directory where you generated the Vagrantfile\n   enter:\n\n   ```sh\n   vagrant up\n   ```\n\nThis will download the VirtualBox image and fire up the VM with some preconfigured\nvalues as you can see in the Vagrantfile. As you may have noticed, you need\nplenty of RAM (5GB in our example), so make sure you have enough.\n\nNow that OpenShift is set up, let's see how the web console looks like.\n\n### Explore the OpenShift web console\n\nOnce Vagrant finishes its thing with the VM, you will be presented with a\nmessage which has some important information. One of them is the IP address\nof the deployed OpenShift platform and in particular \u003Chttps://10.2.2.2:8443/console/>.\nOpen this link with your browser and accept the self-signed certificate in\norder to proceed.\n\nLet's login as admin with username/password `admin/admin`. This is what the\nlanding page looks like:\n\n![openshift web console](https://about.gitlab.com/images/blogimages/get-started-with-openshift-origin-3-and-gitlab/web-console.png)\n\nYou can see that a number of [projects] are already created for testing purposes.\n\nIf you head over the `openshift-infra` project, a number of services with their\nrespective pods are there to explore.\n\n![openshift web console](https://about.gitlab.com/images/blogimages/get-started-with-openshift-origin-3-and-gitlab/openshift-infra-project.png)\n\nWe are not going to explore the whole interface, but if you want to learn about\nthe key concepts of OpenShift, read the [core concepts reference][core] in the\nofficial documentation.\n\n### Explore the OpenShift CLI\n\nOpenShift Client (`oc`), is a powerful CLI tool that talks to the OpenShift API\nand performs pretty much everything you can do from the web UI and much more.\n\nAssuming you have [installed][oc] it, let's explore some of its main\nfunctionalities.\n\nLet's first see the version of `oc`:\n\n```sh\n$ oc version\n\noc v1.3.0\nkubernetes v1.3.0+52492b4\n```\n\nWith `oc help` you can see the top level arguments you can run with `oc` and\ninteract with your cluster, kubernetes, run applications, create projects and\nmuch more.\n\nLet's login to the all-in-one VM and see how to achieve the same results like\nwhen we visited the web console earlier. The username/password for the\nadministrator user is `admin/admin`. There is also a test user with username/\npassword `user/user`, with limited access. Let's login as admin for the moment:\n\n```sh\n$ oc login https://10.2.2.2:8443\n\nAuthentication required for https://10.2.2.2:8443 (openshift)\nUsername: admin\nPassword:\nLogin successful.\n\nYou have access to the following projects and can switch between them with 'oc project \u003Cprojectname>':\n\n  * cockpit\n  * default (current)\n  * delete\n  * openshift\n  * openshift-infra\n  * sample\n\nUsing project \"default\".\n```\n\nSwitch to the `openshift-infra` project with:\n\n```sh\noc project openshift-infra\n```\n\nAnd finally, see its status:\n\n```sh\noc status\n```\n\nThe last command should spit a bunch of information about the statuses of the\npods and the services, which if you look closely is what we encountered in the\nsecond image when we explored the web console.\n\nYou can always read more about `oc` in the [OpenShift CLI documentation][oc].\n\n### Troubleshooting the all-in-one VM\n\nUsing the all-in-one VM gives you the ability to test OpenShift whenever you\nwant. That means you get to play with it, shutdown the VM, and pick up where\nyou left off.\n\nSometimes though, you may encounter some issues, like OpenShift not running\nwhen booting up the VM. The web UI may not responding or you may see issues\nwhen trying to login with `oc`, like:\n\n```\nThe connection to the server 10.2.2.2:8443 was refused - did you specify the right host or port?\n```\n\nIn that case, the OpenShift service might not be running, so in order to fix it:\n\n1. SSH into the VM by going to the directory where the Vagrantfile is and then\n   run:\n\n   ```sh\n   vagrant ssh\n   ```\n\n1. Run `systemctl` and verify by the output that the `openshift` service is not\n   running (it will be in red color). If that's the case start the service with:\n\n   ```sh\n   sudo systemctl start openshift\n   ```\n\n1. Verify the service is up with:\n\n   ```sh\n   systemctl status openshift -l\n   ```\n\nNow you will be able to login using `oc` (like we did before) and visit the web\nconsole.\n\n## Deploy GitLab\n\nNow that you got a taste of what OpenShift looks like, let's deploy GitLab!\n\n### Create a new project\n\nFirst, we will create a new project to host our application. You can do this\neither by running the CLI client:\n\n```bash\n$ oc new-project gitlab\n```\n\nor by using the web interface:\n\n![Create a new project from the UI](https://about.gitlab.com/images/blogimages/get-started-with-openshift-origin-3-and-gitlab/create-project-ui.png)\n\nIf you used the command line, `oc` automatically uses the new project and you\ncan see its status with:\n\n```sh\n$ oc status\n\nIn project gitlab on server https://10.2.2.2:8443\n\nYou have no services, deployment configs, or build configs.\nRun 'oc new-app' to create an application.\n```\n\nIf you visit the web console, you can now see `gitlab` listed in the projects list.\n\nThe next step is to import the OpenShift template for GitLab.\n\n### Import the template\n\nThe [template][templates] is basically a JSON file which describes a set of\nrelated object definitions to be created together, as well as a set of\nparameters for those objects.\n\nThe template for GitLab resides in the Omnibus GitLab repository under the\ndocker directory. Let's download it locally with `wget`:\n\n```bash\nwget https://gitlab.com/gitlab-org/omnibus-gitlab/raw/master/docker/openshift-template.json\n```\n\nAnd then let's import it in OpenShift:\n\n```bash\noc create -f openshift-template.json -n openshift\n```\n\n**Note**\n\nThe `-n openshift` namespace flag is a trick to make the template available to all\nprojects. If you recall from when we created the `gitlab` project, `oc` switched\nto it automatically, and that can be verified by the `oc status` command. If\nyou omit the namespace flag, the application will be available only to the\ncurrent project, in our case `gitlab`. The `openshift` namespace is a global\none that the administrators should use if they want the application to be\navailable to all users.\n\nWe are now ready to finally deploy GitLab!\n\n### Create a new application\n\nThe next step is to use the template we previously imported. Head over to the\n`gitlab` project and hit the **Add to Project** button.\n\n![Add to project](https://about.gitlab.com/images/blogimages/get-started-with-openshift-origin-3-and-gitlab/add-to-project.png)\n\nThis will bring you to the catalog where you can find all the pre-defined\napplications ready to deploy with the click of a button. Search for `gitlab`\nand you will see the previously imported template:\n\n![Add GitLab to project](https://about.gitlab.com/images/blogimages/get-started-with-openshift-origin-3-and-gitlab/add-gitlab-to-project.png)\n\nSelect it, and in the following screen you will be presented with the predefined\nvalues used with the GitLab template:\n\n![GitLab settings](https://about.gitlab.com/images/blogimages/get-started-with-openshift-origin-3-and-gitlab/gitlab-settings.png)\n\nNotice at the top that there are three resources to be created with this\ntemplate:\n\n- `gitlab-ce`\n- `gitlab-ce-redis`\n- `gitlab-ce-postgresql`\n\nWhile PostgreSQL and Redis are bundled in Omnibus GitLab, the template is using\nseparate images as you can see from [this line][line] in the template.\n\nThe predefined values have been calculated for the purposes of testing out\nGitLab in the all-in-one VM. You don't need to change anything here, hit\n**Create** to start the deployment.\n\nIf you are deploying to production you will want to change the **GitLab instance\nhostname** and use greater values for the volume sizes. If you don't provide a\npassword for PostgreSQL, it will be created automatically.\n\n**Note**\n\nThe `gitlab.apps.10.2.2.2.xip.io` hostname that is used by default will\nresolve to the host with IP `10.2.2.2` which is the IP our VM uses. It is a\ntrick to have distinct FQDNs pointing to services that are on our local network.\nRead more on how this works in \u003Chttp://xip.io>.\n\nNow that we configured this, let's see how to manage and scale GitLab.\n\n## Manage and scale GitLab\n\nSetting up GitLab for the first time might take a while depending on your\ninternet connection and the resources you have attached to the all-in-one VM.\nGitLab's docker image is quite big (~500MB), so you'll have to wait until\nit's downloaded and configured before you use it.\n\n### Watch while GitLab gets deployed\n\nNavigate to the `gitlab` project at **Overview**. You can notice that the\ndeployment is in progress by the orange color. The Docker images are being\ndownloaded and soon they will be up and running.\n\n![GitLab overview](https://about.gitlab.com/images/blogimages/get-started-with-openshift-origin-3-and-gitlab/gitlab-overview.png)\n\nSwitch to the **Browse > Pods** and you will eventually see all 3 pods in a\nrunning status. Remember the 3 resources that were to be created when we first\ncreated the GitLab app? This is where you can see them in action.\n\n![Running pods](https://about.gitlab.com/images/blogimages/get-started-with-openshift-origin-3-and-gitlab/running-pods.png)\n\nYou can see GitLab being reconfigured by taking look at the logs in realtime.\nClick on `gitlab-ce-2-j7ioe` (your ID will be different) and go to the **Logs**\ntab.\n\n![GitLab logs](https://about.gitlab.com/images/blogimages/get-started-with-openshift-origin-3-and-gitlab/gitlab-logs.png)\n\nAt a point you should see a _**gitlab Reconfigured!**_ message in the logs.\nNavigate back to the **Overview** and hopefully all pods will be up and running.\n\n![GitLab running](https://about.gitlab.com/images/blogimages/get-started-with-openshift-origin-3-and-gitlab/gitlab-running.png)\n\nCongratulations! You can now navigate to your new shinny GitLab instance by\nvisiting \u003Chttp://gitlab.apps.10.2.2.2.xip.io> where you will be asked to\nchange the root user password. Login using `root` as username and providing the\npassword you just set, and start using GitLab!\n\n### Scale GitLab with the push of a button\n\nIf you reach to a point where your GitLab instance could benefit from a boost\nof resources, you'd be happy to know that you can scale up with the push of a\nbutton.\n\nIn the **Overview** page just click the up arrow button in the pod where\nGitLab is. The change is instant and you can see the number of [replicas] now\nrunning scaled to 2.\n\n![GitLab scale](https://about.gitlab.com/images/blogimages/get-started-with-openshift-origin-3-and-gitlab/gitlab-scale.png)\n\nUpping the GitLab pods is actually like adding new application servers to your\ncluster. You can see how that would work if you didn't use GitLab with\nOpenShift by following the [HA documentation][ha] for the application servers.\n\nBare in mind that you may need more resources (CPU, RAM, disk space) when you\nscale up. If a pod is in pending state for too long, you can navigate to\n**Browse > Events** and see the reason and message of the state.\n\n![No resources](https://about.gitlab.com/images/blogimages/get-started-with-openshift-origin-3-and-gitlab/no-resources.png)\n\n### Scale GitLab using the `oc` CLI\n\nUsing `oc` is super easy to scale up the replicas of a pod. You may want to\nskim through the [basic CLI operations][basic-cli] to get a taste how the CLI\ncommands are used. Pay extra attention to the object types as we will use some\nof them and their abbreviated versions below.\n\nIn order to scale up, we need to find out the name of the replication controller.\nLet's see how to do that using the following steps.\n\n1. Make sure you are in the `gitlab` project:\n\n   ```sh\n   oc project gitlab\n   ```\n\n1. See what services are used for this project:\n\n   ```sh\n   oc get svc\n   ```\n\n   The output will be similar to:\n\n   ```\n   NAME                   CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE\n   gitlab-ce              172.30.243.177   \u003Cnone>        22/TCP,80/TCP   5d\n   gitlab-ce-postgresql   172.30.116.75    \u003Cnone>        5432/TCP        5d\n   gitlab-ce-redis        172.30.105.88    \u003Cnone>        6379/TCP        5d\n   ```\n\n1. We need to see the replication controllers of the `gitlab-ce` service.\n   Get a detailed view of the current ones:\n\n   ```sh\n   oc describe rc gitlab-ce\n   ```\n\n   This will return a large detailed list of the current replication controllers.\n   Search for the name of the GitLab controller, usually `gitlab-ce-1` or if\n   that failed at some point and you spawned another one, it will be named\n   `gitlab-ce-2`.\n\n1. Scale GitLab using the previous information:\n\n   ```sh\n   oc scale --replicas=2 replicationcontrollers gitlab-ce-2\n   ```\n\n1. Get the new replicas number to make sure scaling worked:\n\n   ```sh\n   oc get rc gitlab-ce-2\n   ```\n\n   which will return something like:\n\n   ```\n   NAME          DESIRED   CURRENT   AGE\n   gitlab-ce-2   2         2         5d\n   ```\n\nAnd that's it! We successfully scaled the replicas to 2 using the CLI.\n\nAs always, you can find the name of the controller using the web console. Just\nclick on the service you are interested in and you will see the details in the\nright sidebar.\n\n![Replication controller name](https://about.gitlab.com/images/blogimages/get-started-with-openshift-origin-3-and-gitlab/rc-name.png)\n\n### Autoscaling GitLab\n\nIn case you were wondering whether there is an option to autoscale a pod based\non the resources of your server, the answer is yes, of course there is.\n\nWe will not expand on this matter, but feel free to read the documentation on\nOpenShift's website about [autoscaling].\n\n## Current limitations\n\nAs stated in the [all-in-one VM][vm] page:\n\n> By default, OpenShift will not allow a container to run as root or even a\nnon-random container assigned userid. Most Docker images in the Dockerhub do not\nfollow this best practice and instead run as root.\n\nThe all-in-one VM we are using has this security turned off so it will not\nbother us. In any case, it is something to keep in mind when deploying GitLab\non a production cluster.\n\nIn order to deploy GitLab on a production cluster, you will need to assign the\nGitLab service account  to the `anyuid` Security Context.\n\n1. Edit the Security Context:\n   ```sh\n   oc edit scc anyuid\n   ```\n\n1. Add `system:serviceaccount:\u003Cproject>:gitlab-ce-user` to the `users` section.\n   If you changed the Application Name from the default the user will\n     will be `\u003Capp-name>-user` instead of `gitlab-ce-user`\n\n1. Save and exit the editor\n\n## Conclusion\n\nBy now, you should have an understanding of the basic OpenShift Origin concepts\nand a sense of how things work using the web console or the CLI.\n\nGitLab was hard to install in previous versions of OpenShift,\nbut now that belongs to the past. Upload a template, create a project, add an\napplication and you are done. You are ready to login to your new GitLab instance.\n\nAnd remember that in this tutorial we just scratched the surface of what Origin\nis capable of. As always, you can refer to the detailed\n[documentation][openshift-docs] to learn more about deploying your own OpenShift\nPaaS and managing your applications with the ease of containers.\n\n[RedHat]: https://www.redhat.com/en \"RedHat website\"\n[openshift]: https://www.openshift.org \"OpenShift Origin website\"\n[vm]: https://www.openshift.org/vm/ \"OpenShift All-in-one VM\"\n[vm-new]: https://atlas.hashicorp.com/openshift/boxes/origin-all-in-one \"Official OpenShift Vagrant box on Atlas\"\n[template]: https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/docker/openshift-template.json \"OpenShift template for GitLab\"\n[openshift.com]: https://openshift.com \"OpenShift Online\"\n[kubernetes]: http://kubernetes.io/ \"Kubernetes website\"\n[Docker]: https://www.docker.com \"Docker website\"\n[oc]: https://docs.openshift.org/latest/cli_reference/get_started_cli.html \"Documentation - oc CLI documentation\"\n[VirtualBox]: https://www.virtualbox.org/wiki/Downloads \"VirtualBox downloads\"\n[Vagrant]: https://www.vagrantup.com/downloads.html \"Vagrant downloads\"\n[projects]: https://docs.openshift.org/latest/dev_guide/projects.html \"Documentation - Projects overview\"\n[core]: https://docs.openshift.org/latest/architecture/core_concepts/index.html \"Documentation - Core concepts of OpenShift Origin\"\n[templates]: https://docs.openshift.org/latest/architecture/core_concepts/templates.html \"Documentation - OpenShift templates\"\n[old-post]: https://blog.openshift.com/deploy-gitlab-openshift/ \"Old post - Deploy GitLab on OpenShift\"\n[line]: https://gitlab.com/gitlab-org/omnibus-gitlab/blob/658c065c8d022ce858dd63eaeeadb0b2ddc8deea/docker/openshift-template.json#L239 \"GitLab - OpenShift template\"\n[oc-gh]: https://github.com/openshift/origin/releases/tag/v1.3.0 \"Openshift 1.3.0 release on GitHub\"\n[ha]: https://docs.gitlab.com/ee/administration/reference_architectures/index.html \"Documentation - GitLab High Availability\"\n[replicas]: https://docs.openshift.org/latest/architecture/core_concepts/deployments.html#replication-controllers \"Documentation - Replication controller\"\n[autoscaling]: https://docs.openshift.org/latest/dev_guide/pod_autoscaling.html \"Documentation - Autoscale\"\n[basic-cli]: https://docs.openshift.org/latest/cli_reference/basic_cli_operations.html \"Documentation - Basic CLI operations\"\n[openshift-docs]: https://docs.openshift.org \"OpenShift documentation\"\n",{"slug":8890,"featured":6,"template":678},"get-started-with-openshift-origin-3-and-gitlab","content:en-us:blog:get-started-with-openshift-origin-3-and-gitlab.yml","Get Started With Openshift Origin 3 And Gitlab","en-us/blog/get-started-with-openshift-origin-3-and-gitlab.yml","en-us/blog/get-started-with-openshift-origin-3-and-gitlab",{"_path":8896,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8897,"content":8903,"config":8907,"_id":8909,"_type":16,"title":8910,"_source":17,"_file":8911,"_stem":8912,"_extension":20},"/en-us/blog/secure-gitlab-pages-with-startssl",{"title":8898,"description":8899,"ogTitle":8898,"ogDescription":8899,"noIndex":6,"ogImage":8900,"ogUrl":8901,"ogSiteName":692,"ogType":693,"canonicalUrls":8901,"schema":8902},"Secure GitLab Pages with StartSSL","A quick overview on SSL/TLS certificates and StartCom CA and a comparison between StartSSL Class 1 and Let's Encrypt.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749684890/Blog/Hero%20Images/startssl-gitlab-pages-cover.jpg","https://about.gitlab.com/blog/secure-gitlab-pages-with-startssl","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Secure GitLab Pages with StartSSL\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Marcia Ramos\"}],\n        \"datePublished\": \"2016-06-24\",\n      }",{"title":8898,"description":8899,"authors":8904,"heroImage":8900,"date":8905,"body":8906,"category":14},[8399],"2016-06-24","**StartCom** certificates have recently been **distrusted** by [Mozilla Firefox](https://blog.mozilla.org/security/2016/10/24/distrusting-new-wosign-and-startcom-certificates/) and [Google Chrome](https://security.googleblog.com/2016/10/distrusting-wosign-and-startcom.html). Certs issued prior to October 21st, 2016 don't seem to have been affected and are therefore still trusted.\u003Cbr>\nIn response to my contact, StartCom affirmed they're working hard to revert this situation and hope to have a resolution by the end of January, 2017.\u003Cbr>\n\u003Cspan class=\"note\">Update by Marcia Ramos, on 2016/12/20.\u003C/span>\n\nWith [GitLab Pages][pages] you can host your static website under your custom domain.\nWith a [StartSSL] digital certificate you can secure it. And that's all for **free**!\n\nIn this post, first we'll give you a quick overview on SSL/TLS certificates and StartCom CA, then\nwe will show you a comparison between [StartSSL Class 1][startssl-class-1] and [Let's Encrypt][lets]\nto facilitate your decision to choose one over another.\n\nFinally, we will guide you through the process of securing your GitLab Pages site\nwith **[StartSSL Class 1 free certificates][startssl-class-1]**.\n\n**Note:** We assume you are familiar with web development and web hosting.\n\n\u003C!-- more -->\n\n----------\n\n### What's in this tutorial?\n- TOC\n\n----\n\n## HTTPS: a quick overview\n\n### Why should I care about HTTPS?\n\nPerhaps this might be your first question. If our sites are hosted by [GitLab Pages][pages],\ntherefore they are [static][ssg-post], hence we are not dealing with server-side scripts\nnor credit card transactions, so why do we need secure connections? \n\nBack in the 1990s, where HTTPS came out, [SSL]\u003Csup>[1](#1)\u003C/sup> was considered a \"special\"\nsecurity measure, necessary just for big folks, like banks and shoppings sites with financial transactions.\nNow we have a different picture. [According to Josh Aas][lets-quote], [ISRG] Executive Director:\n\n> _We’ve since come to realize that HTTPS is important for almost all websites. It’s important for any website that allows people to log in with a password, any website that [tracks its users][lets-ref1] in any way, any website that [doesn’t want its content altered][lets-ref2], and for any site that offers content people might not want others to know they are consuming. We’ve also learned that any site not secured by HTTPS [can be used to attack other sites][lets-ref3]._\n\nHow about taking Josh's advice and protecting our sites too? We will be well supported,\nand we'll contribute to a safer internet.\n\n### Organizations supporting HTTPS\n\nThere is a huge movement in favor of securing all the web. W3C fully [supports the cause][w3c-https]\nand explains very well the reasons for that. Richard Barnes, a writer for Mozilla Security Blog,\nsuggested that [Firefox would deprecate HTTP][moz-http-deprecate], and would no longer accept\nunsecured connections. Recently, Mozilla published a [communicate][moz-comm] reiterating the importance of HTTPS.\n\n### Free SSL/TLS Certificates\n\nAs individuals, dealing with small sites for promoting ourselves and our work, we might not be\ninterested in buying a premium\u003Csup>[2](#2)\u003C/sup> [TLS]\u003Csup>[1](#1)\u003C/sup> certificate issued by\na robust [Certification Authority (CA)][wiki-ca], like [Comodo] or [Symantec]. But now we have a\nchoice! We can use free certificates, like the ones issued by [Let's Encrypt][lets] and [StartCom][startssl].\n\n----\n\n**Note 1:** [SSL] stands for **Secure Sockets Layer**, which is the predecessor of **Transport Layer Security** ([TLS]).\n\n**Note 2:** Premium certificates examples: [Comodo SSL/TLS][comodo-ssl], [StartSSL SSL Class 2][startssl-class-2],\n[Symantec SSL][symantec-ssl]. They offer support for e-commerce and grant a huge warranty to their customers.\n\n----\n\n## StartCom\n\n[Start Commercial Ltd. (StartCom)][startssl] was [founded][startssl-about] by [Eddy Nigg] in 2005,\nwho wanted to engage in a \"revolution\" of the digital certification industry, making certificates\nmore affordable and with better quality. StartCom has become the world's sixth largest Certificate\nAuthority, covering nearly one million registered subscribers, and more than 400,000 websites.\n\n[StartSSL™][startssl-class-1] is the [StartCom][startssl] service brand of its digital certificates\nissuing division, it offers [free SSL certificates][startssl-about] and free email encryption certificates\nfor worldwide subscribers. Thus, on this post, **StartCom** refers to the **Certificate Authority**,\nwhile **StartSSL** is attributed to **certificates** issued by **StartCom**.\n\nAmong four classes of StartSSL certificates, there is one **free**, called [Class 1][startssl-class-1].\nThis is the one will be referencing to in this post. But, of course, you are free to choose their premium products too.\nOn their website, you can find a [comparison chart][startssl-compare] for their certificates.\n\n[StartCom Certificate Policy & Practice Statements][startssl-policy] covers a lot of procedures to make\nsure they can trust their customers information. That document states rules, obligations, validations, etc.\nGeneral information can be found through their [documentation][startssl-docs].\n\n### StartSSL Class 1 Features\n\n- Certificates can be [issued in minutes][startssl-class-1]\n- There are two methods of domain verification: email, or code file validation\u003Csup>[3](#3)\u003C/sup> \n- [Certificates are valid for 1 year][startssl-one-year], with [unlimited renewal allowance][startssl-renewal]\n- The [CSR (Certificate Signing Request)][csr] can be generated from any O.S. (Linux, Mac or Windows)\n- S/MIME Client\u003Csup>[4](#4)\u003C/sup> + Authentication\n- Supported by [all browsers and servers][startssl]\n- Live chat support 24/7\n\n----\n\n**Note 3:** Code file validation is a file provided by an institution, which needs to be uploaded\nto the site root. It's a simple HTML file containing a token to verify that we hold that particular\ndomain. It's not used just by CAs. For example, it is also used by Google,\nas a [method for site ownership verification][google-verif].\n\n**Note 4:** [S/MIME][wiki-smime] client certificates are used for client authentication to\nwebsites and for the signing, encryption and decryption of personal data. Most commonly they\nare used for email signing and encryption, but also PDF and office documents. Higher validated\ncertificates can be used to sign contracts in digital format. Source:\n[StartCom UI - Certificates][startssl-certs] (you'll need to be logged into StartCom to have access to this link).\n\n----\n\n### StartSSL Class 1 Limitations\n\n**StartSSL Class 1** certificates cover a lot of attractive features, but have some [limitations][startssl-compare]:\n\n- There is no support for [wildcard DNS record][wildcard], `*.example.com` \n- We are limited to issue certificates for up to 5 domains\n- We can't use commercial \"sensitive\" names for our domains or subdomains (e.g, \"store\", \"buy\", \"shop\", etc)\n- [Revocations carry a handling fee][startssl-revocation] of currently US$ 9.90. Though we may\nuse a different subdomain in order to create additional certificates without the need to\nrevoke a previously created certificate\n\n### StartSSL **vs** Let's Encrypt\n\n[Let's Encrypt][lets] is a free, automated, and open Certificate Authority (CA), provided by\n[Internet Security Research Group (ISRG)][isrg]. They are the first CA to offer exclusively\nfree certificates. They are great, their product is awesome. However, they have some limitations\ntoo. To facilitate our overview and compare Let's Encrypt to StartSSL Class 1 certificates,\nlet's take a look at the table below.\n\n| Feature | StartSSL Class 1 | Let's Encrypt |\n| ---- | :---- | :---- |\n| **Cost** | Free | Free |\n| **Expiration** | \u003Ci class=\"fas fa-check\" style=\"color: green;\">\u003C/i> [In 1 year][startssl-class-1-features] (365 days) | [In 3 months][lets-renewal] (90 days) |\n| **Client Install / CSR** | \u003Ci class=\"fas fa-check\" style=\"color: green;\">\u003C/i> All OSs | Complicated on [Windows][lets-win] |\n| **CSR method** | \u003Ci class=\"fas fa-check\" style=\"color: green;\">\u003C/i> Desktop App or command line | Command line only |\n| **Browser Support** | \u003Ci class=\"fas fa-check\" style=\"color: green;\">\u003C/i> [All][startssl-about] | [Most of them][lets-browser-support] |\n| **Server Support** | [All][startssl-about] | All, with [plugins][lets-plugins] | \n| **Revocation** | [Paid][startssl-revocation] | \u003Ci class=\"fas fa-check\" style=\"color: green;\">\u003C/i> [Free][lets-features] |\n| **S/MIME Client Auth** | \u003Ci class=\"fas fa-check\" style=\"color: green;\">\u003C/i> [Supported][startssl-class-1-features] | [Not supported][lets-smime] |\n| **Insurance** | \u003Ci class=\"fas fa-check\" style=\"color: green;\">\u003C/i> [$ 10,000 USD][startssl-class-1-features] | [None][lets-features] | \n| **Customer support** | \u003Ci class=\"fas fa-check\" style=\"color: green;\">\u003C/i> [Live Chat, Phone][startssl-support], Ticket | [Forum][lets-forum] |\n| **Hash Algorithm** | \u003Ci class=\"fas fa-check\" style=\"color: green;\">\u003C/i> SHA-256, SHA-384, SHA-512 | [SHA-256][lets-sha-256] |\n| **Domain validation** | \u003Ci class=\"fas fa-check\" style=\"color: green;\">\u003C/i> Email or Code file Validation | [Code file validation][lets-domain-validation] |\n| **Domains limit** | [5 domains][startssl-class-1-features] | \u003Ci class=\"fas fa-check\" style=\"color: green;\">\u003C/i> [Limited][lets-limits] |\n| **Subdomains** | Supported | Supported |\n| **Free domains** | Supported | Supported |\n| **Domain name** | Non-commercial names | \u003Ci class=\"fas fa-check\" style=\"color: green;\">\u003C/i> Unrestricted |\n| **Wildcard support** | No | No | \n| **Support EV certificates** | [Upgrading (paid)][start-ssl-ev] | No |\n\nThe information gathered within the table above is available across [StartCom][startssl] website,\ntheir [FAQ][startssl-faq] and their [Policy][startssl-policy]. \n\nFor additional info on Let's Encrypt, you can read their [Certificate Policy][lets-cp],\ntheir [documentation][lets-docs], and this post [Tutorial: Securing your GitLab Pages\nwith TLS and Let's Encrypt][gitlab-post-lets].\n\n## StartSSL with GitLab Pages\n\nDigital certificates are applicable to your [GitLab Pages][pages] project only when you \nse a **custom domain**, as all standard `https://namespace.gitlab.io` urls are secure by default.\n\nWith GitLab Pages we can host our static website, use custom domains (and aliases), and secure our\ndomains with SSL/TLS certificates **for free**. Cost zero, no credit card, no hidden fees! This is\ndetailed in the tutorial \"[Hosting on GitLab.com with GitLab Pages][gitlab-post-pages]\". Also, you can\nread the [quick start guide][pages], and the [documentation][pages-doc] for GitLab Pages.\n\nGitLab Pages supports [PEM] certificates issued by any [CA][wiki-ca], though we need to make sure\nthat the certificate is compatible with [NGINX], the server [where GitLab runs on][gitlab-nginx].\n\nStartCom certificates are retrievable from their [User Interface (UI)][startssl], where you have\naccess to your own information, certificates requests, expiration dates, etc. \n\n### Before getting started\n\nFor the following steps, we assume:\n\n- You already have your custom domain\n- Your project is configured for GitLab Pages \n- Your site is accessible under `http://example.com` or `http://your.example.com` \n\nIf you don't know how to do that, please check this tutorial first: [Hosting on GitLab.com with GitLab Pages][gitlab-post-pages].\n\n## Step-by-step\n\nThe scheme below illustrates the steps we'll take to secure our GitLab Pages sites with StartSSL Class 1 certificates:\n\n![StartSSL with GitLab Pages - step-by-step scheme](https://about.gitlab.com/images/blogimages/startssl-gitlab-pages/startssl-gitlab-pages-scheme.png)\n\n### Step 1: Getting started with StartCom\n\n#### Sign up\n\nStartCom login is certificate-based, which is considered a [high-security user authentication][pki-google-book] method.\nUnlike other mechanisms for web-based authentication, certificates ensure the person or machine is who they claim to be.\n[Certificate-based authentication][startssl-user-auth] also provides data encryption capabilities between the client and the server.\n\n- Navigate to StartCom and [sign up][startssl-sign-up]\n- Enter your country and email address to the respective fields\n- Verify your e-mail (follow their instructions, which are very simple)\n- The system will issue a certificate for your email and save it locally, which will be your\nuser authentication to log in. Choose \"The system generate private key and CSR\", enter your\npassword, and download the file\n- [Import the certificate to your browser][import-cert-browser]. This will allow you to log\ninto your account in one click from then on \n\nOn the next screen, StartCom will present all their certificate options. Choose the box \"Free - Class 1 SSL\".\n\n#### Domain Validation \n\nDomain validation is necessary to make sure that the domain you are issuing the certificate for, is really yours.\n\n- Open the tab **Validation Wizard** -> **Domain Validation** -> add your domain to the field -> **Continue**. \n- Choose the method for domain validation (**a** or **b**): \n   - (a) **e-mail**: take a look at the default emails presented on the box and see if you\n   have access to one of them. If you don't, jump to \"method **b**\". Otherwise, first open\n   your email account and keep it opened. Then, choose one of the radio buttons correspondent\n   to your email and click on **Send verification code**. The code will be sent to your email,\n   and it will be valid for only **60 seconds**. Switch to your email, copy the code, switch\n   back to StartCom, and paste it to the field **Verification code** -> **Validation**. Done!\n   - (b) **code file**: choose  **website control validation** -> download the code file and\n   leave StartCom opened. Push the code file to your site root in your GitLab Pages project.\n   Wait until the build succeeds. Go back to StartCom and click on **the verification file is ready on website, Continue**. Done!\n- Once your domain was verified, click on the link **To \"Order SSL Certificate\"**\n\n#### Certificate Request\n\n- In the box **Please enter the full hostname for SSL certificate**, enter your domain name. There are a few options:\n   - you can enter only the root domain \n   - enter the root domain and some subdomains \n   - enter multiple domains (up to 5). \n\n   Suggestion? Issue one different certificate per domain or subdomain; if something goes wrong, it will be just one to fix.\n- Choose the first option: the radio button for PEM certificates **Generate by myself**.\nIt will open a text area where you'll add the CSR we'll generate next. Leave the tab opened\u003Ca name=\"tab-step-1\">\u003C/a>.\n\n![StartCom - Certificate Wizard - CSR](https://about.gitlab.com/images/blogimages/startssl-gitlab-pages/startssl-gitlab-pages-gen-csr.png)\n\n### Step 2: Generate the CSR\n\nFor this particular step, we have two different approaches: [Linux and Mac](#unix) (Unix), or [Windows](#win).\n\n#### On Linux and Mac\n\n- Open the terminal and check if you have [OpenSSL] installed: `$ openssl version`.\nIf the response is not `OpenSSL x.x.x date`, install it before continue\n- Run the command recommended by StartCom:\n\n      openssl req -newkey rsa:4096 -keyout yourkeyname.key -out yourkeyname.csr\n\n  Use `sudo` if needed. Alternatively, you can keep the key length at `rsa:2048`.\n  The file name (`yourkeyname`) can be chosen at your will\n- Enter the PEM passphrase (it's like a password)\n- Verify it by typing the same passphrase again. Memorize it or make a note.\n- The terminal will ask you questions. Answer the first, then you can leave the others\nblank if you want (hit Enter/Return). Done!\n\nThe files you'll need next will be in the `~home` directory. Open in a text editor both `yourkeyname.key`\nand `yourkeyname.csr`. The first is your private encrypted key. The last is the CSR. The `.key` file will\nbe necessary for [Step 5](#step-5-decrypt-your-private-key) ahead. \n\n#### On Windows\n\n**Note:** the numbers in the parentheses correspond to the numbers on the image below.\n\n- Download the binary file **StartComTool.exe** and launch it \n- Go to the tab **CSR** (1)\n- On the top right, click a small link **Professional mode** (2)\n- Choose the radio button **SSL Certificate** (3)\n- Enter the **Domain name** to its field and fill the country information (4)\n- Choose the radio button **Generate private key** (5)\n- Enter a password (passphrase) and repeat it below (6). Memorize or make a note\n- Choose the hash algorithm and the key length (7)\n- Click **Generate CSR** (8)\n- You will be prompted to choose a file name, and a directory to store both `yourkeyname.key` and\n`yourkeyname.csr`. The `.key` file will be necessary for [Step 5](#step-5-decrypt-your-private-key) ahead.\n\nThe CSR will be shown in the box on the right of the application. Copy all the content (use the button **Copy**) (9).\n\nThe image below illustrates how the desktop application looks like:\n\n![StartCom Tool - Windows](https://about.gitlab.com/images/blogimages/startssl-gitlab-pages/startssl-gitlab-pages-windows.png)\n\nAlternatively, you can follow [these instructions][google-gen-csr] to generate the CSR via command line on Windows.\n\n### Step 3: Issue your certificate\n\n- Go back to [StartCom][startssl-csr-enter]\n- Copy the content of the file `yourkeyname.csr`\n- Paste it into the text-area we [had left opened](#tab-step-1) on Step 1\n- Click **Submit**\n- Done! Your certificate must have been issued!\n\n### Step 4: Retrieve your certificate\n\nWhen you download your certificate, you'll notice that it comes within a zipped folder with 4\noptions for you to use: Apache, Nginx, IIS, Other. The certificate you'll use for **GitLab Pages**\nis the one in the `NginxServer` directory.\n\n- On your StartCom UI, go to **Tool Box** -> **Certificate List** -> **Retrieve**\n- Download and unzip the file\n- Unzip the folder named `NginxServer`, your certificate will be there. It has a `.cf_bundle` extension\n- Open the certificate with a text-editor and leave it opened. We'll need it for\n[Step 6](#step-6-add-domain-and-certificate-to-your-project) ahead\n\nThe certificate looks like the code below.\n\n```\n-----BEGIN CERTIFICATE-----\nMQswCKIhggfrOJmJJTDEWjkfhMNU3RhcndfjdfnuNJFHUnjfhjEGJSNSInjsnxLg\n... \nnEFH63o+ycNl2jR29jd8c8c+MBIWrYGH8TPy0GCIguwTEzY=\n-----END CERTIFICATE-----\n\n-----BEGIN CERTIFICATE-----\nMQswjsdbsfbdsfnKJHUNAIHoihushdiKHJUhcnRDsjdhJBSD786523987JHSDxLg\n... \n3KSjh872631903rjfefy47fh49fjjqjdjqd9e8fuufe8MbLm==\n-----END CERTIFICATE-----\n```\n\n### Step 5: Decrypt your private key\n\nThe key we'd generated on [Step 2](#step-2-generate-the-csr), `yourkeyname.key`, is our private **encrypted** key.\nThis key should **not** be used nor exposed. It's necessary to **decrypt** it before using. To make it easier to\nrecognize theses keys, observe that the code for an **encrypted** private key will always begin with\n`BEGIN ENCRYPTED PRIVATE KEY`, while a **decrypted** private key will always begin with `BEGIN RSA PRIVATE KEY`.\n\nThere are two ways to proceed in this case: [via command line (option A)](#cmd) and via [StartCom UI (option b)](#ui).\nChoose whichever option you feel more comfortable with.\n\n#### Option A: Via command line\n\nIn your terminal, type \n\n```\nopenssl rsa -in yourkeyname.key -out yourkeyname-decrypted.key\n```\n\nwhere `yourkeyname` is the name of the encrypted key and `yourkeyname-decrypted.key` will be the name of the decrypted key.\nUse the same password you set up before (on Step 2) when prompted. Your new key will be in your `~home` directory.\n\nOn Windows, proceed likewise, but `cd path/to/folder` before beginning. The decrypted key will be stored in the same\ndirectory you've just navigated to.\n\n#### Option B: Via StartCom UI\n\n- On your [StartCom UI][startssl], navigate to the **Tool Box** tab and click **Decrypt Private Key**\n- Locally, open the private encrypted key in a text-editor. Copy all its content.\n- Back to the UI, paste the content to the text-area and type your passphrase (used on Step 2)\n- Click **Decrypt**\n- Copy the decrypted key from the new box\n- Paste the key in a new document and save it. Leave it opened, we'll need it for the next step\n- Done!\n\nThe image below illustrates the result of the steps above.\n\n![StartCom - decrypt private key](https://about.gitlab.com/images/blogimages/startssl-gitlab-pages/startssl-gitlab-pages-decrypt-key.png)\n\n### Step 6: Add domain and certificate to your project\n\nFinally, let's add the key and the certificate to our project:\n\n- On your GitLab UI, open your **Project's Settings** (\u003Ci class=\"fas fa-cog\" aria-hidden=\"true\">\u003C/i>) and navigate to **Pages**\n- If your domain (the one you just issued the certificate for) is already added to that project,\n**remove** it. Don't worry, we'll add it back\n- Click **+ New Domain**\n- Add the domain to the first field\n- Copy the certificate from [Step 4](#step-4-retrieve-your-certificate), and paste it to the second field\n- Copy the **decrypted** private key from [Step 5](#step-5-decrypt-your-private-key), and paste it to the third field\n- Click **Create New Domain**\n- Done!\n\n![StartCom - add key and certificate to GitLab project](https://about.gitlab.com/images/blogimages/startssl-gitlab-pages/startssl-gitlab-pages-demo.png)\n\nIf we've set up everything correctly, this is the screen we'll see on our GitLab UI:\n\n![StartSSL + GitLab - valid certificate](https://about.gitlab.com/images/blogimages/startssl-gitlab-pages/startssl-gitlab-pages-certificate-valid.png)\n\nNow your domain will be accessed under secure HTTPS protocol and will display on the browser the\ngreen lock we were looking forward to see! You can perform a test to check your certificate\nwith [Site 24x7][site-24-7], if you want.\n\nDo you want to see a working example of a GitLab Pages Project secure with StartSSL Class 1?\nVisit \u003Chttps://gitlab.virtuacreative.com.br> and click on the green lock\n(\u003Ci class=\"fas fa-lock\" style=\"color:rgb(0,192,29)\" aria-hidden=\"true\">\u003C/i>) to check the certificate.\nThis URL is an alias for \u003Chttps://virtuacreative.gitlab.io>.\n\n## Getting Help\n\nIf you need some help regarding GitLab Pages on GitLab.com,\nfeel free to use one of [our channels][get-help]. You can also\nopen an issue on the [Pages][pages-issues] group.\n\n## Conclusion\n\n\u003C!-- TO BE IMPROVED -->\n\nHopefully, now you understand why HTTPS protocol is important even for static sites, and you know how to issue a free\ncertificate from [StartCom][startssl]. With secure urls, we are contributing to a better and safer internet!\n\nDon't you have an account on [GitLab.com][sign-up] yet? Let's create one!\nRemember, we can use GitLab Pages to [build any SSG][gitlab-post-pages] for us, and host any static site for free!\n\nFollow [@GitLab][twitter] on Twitter and stay tuned for updates!\n\n\u003C!-- Green lock, public domain: https://commons.wikimedia.org/wiki/File:Move_protect.svg -->\n\n\u003C!-- Cover image, Creative Commons Zero: http://gratisography.com/ -->\n\n\u003C!-- IDENTIFIERS --> \n\n\u003C!-- Alphabetical, miscellaneous -->\n\n[comodo]: https://www.comodo.com/\n[comodo-ssl]: https://ssl.comodo.com/?key5sk1=8721b4e5835982357f2a40802cc408ec59a88e2b\n[csr]: https://en.wikipedia.org/wiki/Certificate_signing_request\n[Eddy Nigg]: https://twitter.com/eddy_nigg\n[google-verif]: https://support.google.com/webmasters/answer/35179?hl=en\n[google-gen-csr]: https://developers.google.com/web/fundamentals/security/encrypt-in-transit/generating-keys-and-csr#generate-a-csr\n[import-cert-browser]: http://help-icc.untangle.com/Content/User%20Guide/UI_Tabs/SSLCertificateGPO_v4/Appendix%20B%20Installing%20the.htm\n[isrg]: https://en.wikipedia.org/wiki/Internet_Security_Research_Group\n[lets]: https://letsencrypt.org/\n[lets-browser-support]: https://community.letsencrypt.org/t/which-browsers-and-operating-systems-support-lets-encrypt/4394\n[lets-cp]: https://letsencrypt.org/repository/\n[lets-docs]: https://letsencrypt.readthedocs.org/en/latest/intro.html\n[lets-domain-validation]: https://letsencrypt.org/how-it-works/#domain-validation\n[lest-faq]: https://community.letsencrypt.org/t/frequently-asked-questions-faq/26\n[lets-features]: http://letsencrypt.readthedocs.org/en/latest/intro.html?highlight=revoke#current-features\n[lets-forum]: https://community.letsencrypt.org/\n[lets-limits]: https://community.letsencrypt.org/t/rate-limits-for-lets-encrypt/6769\n[lets-quote]: https://letsencrypt.org/2015/10/29/phishing-and-malware.html\n[lets-plugins]: https://letsencrypt.readthedocs.org/en/latest/using.html#plugins\n[lets-ref1]: https://www.washingtonpost.com/news/the-switch/wp/2013/12/10/nsa-uses-google-cookies-to-pinpoint-targets-for-hacking/\n[lets-ref2]: http://arstechnica.com/tech-policy/2014/09/why-comcasts-javascript-ad-injections-threaten-security-net-neutrality/\n[lets-ref3]: http://krebsonsecurity.com/2015/04/dont-be-fodder-for-chinas-great-cannon/\n[lets-renewal]: http://letsencrypt.readthedocs.org/en/latest/using.html#renewal\n[lets-requirements]: http://letsencrypt.readthedocs.org/en/latest/intro.html?highlight=revoke#system-requirements\n[lets-sha-256]: https://community.letsencrypt.org/t/does-the-certificate-offer-a-sha-2-signature/8914\n[lets-smime]: https://community.letsencrypt.org/t/s-mime-certificates/153\n[lets-win]: https://cultiv.nl/blog/lets-encrypt-on-windows/\n[moz-http-deprecate]: https://blog.mozilla.org/security/2015/04/30/deprecating-non-secure-http/\n[moz-comm]: https://blog.mozilla.org/security/2016/03/29/march-2016-ca-communication/\n[nginx]: https://www.nginx.com/\n[openssl]: https://www.openssl.org/\n[pages]: https://pages.gitlab.io/\n[pages-doc]: http://doc.gitlab.com/ee/pages/README.html\n[pem]: https://support.ssl.com/Knowledgebase/Article/View/19/0/der-vs-crt-vs-cer-vs-pem-certificates-and-how-to-convert-them\n[pki-google-book]: https://books.google.com.br/books?id=oswvyhAftLsC&pg=PA69&redir_esc=y#v=onepage&q&f=false\n[sha]: https://en.wikipedia.org/wiki/Secure_Hash_Algorithm\n[site-24-7]: https://www.site24x7.com/ssl-certificate.html\n[ssg-post]: /blog/ssg-overview-gitlab-pages-part-1-dynamic-x-static/\n[symantec]: https://www.symantec.com/\n[symantec-ssl]: https://www.symantec.com/ssl-certificates/\n[startssl]: https://startssl.com/\n[startssl-about]: https://startssl.com/\n[startssl-certs]: https://startssl.com/\n[startssl-class-1]: https://startssl.com/\n[startssl-class-1-features]: https://startssl.com/\n[startssl-class-2]: https://startssl.com/\n[startssl-compare]: https://startssl.com/\n[startssl-csr-enter]: https://startssl.com/\n[startssl-docs]: https://startssl.com/\n[start-ssl-ev]: https://startssl.com/\n[startssl-faq]: https://startssl.com/\n[startssl-one-year]: https://startssl.com/\n[startssl-policy]: https://startssl.com/\n[startssl-renewal]: https://twitter.com/startssl/status/213348291654594560\n[startssl-revocation]: https://startssl.com/\n[startssl-sign-up]: https://startssl.com/\n[startssl-support]: https://startssl.com/\n[startssl-user-auth]: https://www.entrust.com/wp-content/uploads/2013/05/DS_MSO_UserWebAuth_web_July2012.pdf\n[static webpage]: https://en.wikipedia.org/wiki/Static_web_page\n[ssl]: https://en.wikipedia.org/wiki/Transport_Layer_Security#SSL_1.0.2C_2.0_and_3.0\n[tls]: https://en.wikipedia.org/wiki/Transport_Layer_Security\n[w3c-https]: https://w3ctag.github.io/web-https/\n[wiki-ca]: https://en.wikipedia.org/wiki/Certificate_authority\n[wiki-smime]: https://en.wikipedia.org/wiki/S/MIME\n[wildcard]: https://en.wikipedia.org/wiki/Wildcard_DNS_record\n\n\u003C!-- GitLab -->\n\n[about-gitlab-com]: /\n[get-help]: /get-help/\n[gitlab-com]: /gitlab-com/\n[pages]: https://pages.gitlab.io\n[pages-ee]: http://doc.gitlab.com/ee/pages/README.html\n[pages-issues]: https://gitlab.com/pages/pages.gitlab.io/issues\n[gitlab-nginx]: http://doc.gitlab.com/ee/install/installation.html#nginx\n[gitlab-post-lets]: /blog/tutorial-securing-your-gitlab-pages-with-tls-and-letsencrypt/\n[gitlab-post-pages]: /blog/gitlab-pages-setup/\n[sign-up]: https://gitlab.com/users/sign_in \"Sign Up!\"\n[twitter]: https://twitter.com/gitlab\n",{"slug":8908,"featured":6,"template":678},"secure-gitlab-pages-with-startssl","content:en-us:blog:secure-gitlab-pages-with-startssl.yml","Secure Gitlab Pages With Startssl","en-us/blog/secure-gitlab-pages-with-startssl.yml","en-us/blog/secure-gitlab-pages-with-startssl",{"_path":8914,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8915,"content":8921,"config":8926,"_id":8928,"_type":16,"title":8929,"_source":17,"_file":8930,"_stem":8931,"_extension":20},"/en-us/blog/how-we-scale-gitlab-by-having-docker-built-in",{"title":8916,"description":8917,"ogTitle":8916,"ogDescription":8917,"noIndex":6,"ogImage":8918,"ogUrl":8919,"ogSiteName":692,"ogType":693,"canonicalUrls":8919,"schema":8920},"How we scale GitLab by having Docker built in","Read on for more details on how we scale GitLab by having Docker built in.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749684707/Blog/Hero%20Images/scale-GitLab-Docker-built-in-cover.png","https://about.gitlab.com/blog/how-we-scale-gitlab-by-having-docker-built-in","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we scale GitLab by having Docker built in\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"DJ Mountney\"}],\n        \"datePublished\": \"2016-06-21\",\n      }",{"title":8916,"description":8917,"authors":8922,"heroImage":8918,"date":8924,"body":8925,"category":14},[8923],"DJ Mountney","2016-06-21","\n\nOur [Docker image](http://docs.gitlab.com/omnibus/docker/) is a great way to\nquickly bring up an instance of GitLab. You can use it to try new features, or\nmount the storage volumes and use it for all your GitLab needs.\n\nIt has been over two years since we started thinking about [Docker and GitLab together](https://gitlab.com/gitlab-org/omnibus-gitlab/issues/59).\nIn those years we have pushed over 100 CE and EE docker images to [Docker Hub](https://hub.docker.com/u/gitlab/),\nand have built new features like GitLab CI with [built-in Docker support](https://docs.gitlab.com/ee/ci/docker/using_docker_images.html),\nhelping us (and you!) to test and build our applications easier and faster.\n\nRead on for more details on how we scale GitLab by having Docker built in.\n\n\u003C!-- more -->\n\n## Why Docker?\n\n[Docker](https://www.docker.com/) provides a set of tools for running processes\nin a virtualized container. This satisfies most of the same use-cases as a\nvirtual machine, but re-uses the host system's kernel, making it faster to boot\nup. It uses a layered filesystem which can be re-used among several containers,\nallowing it to take up less space.\n\n[Docker Hub](https://hub.docker.com/) provides a central registry of images,\nwhich helps make our GitLab image more discoverable. Docker's popularity has\nresulted in increased development in virtual containers, and now has a large\ncommunity around it to provide support.\n\nFor the GitLab application image, we use Docker because it is the most familiar\ncontainer provider for our users, and is well supported.\n\nFor use within GitLab CI, we chose it because it is easy to manage and its\nlightweight nature makes it easy to scale our CI tasks.\n\n## Docker built right into GitLab\n\n### A GitLab CI Docker executor\n\n[GitLab CI](/solutions/continuous-integration/) is our continuous integration feature, built right into\nGitLab. It allows us to run several tasks against our code as we develop\nand deploy our applications. These tasks are run by something called a [Runner](http://doc.gitlab.com/ce/ci/runners/README.html)\nwhich processes builds.\n\nIn 2015 the community created a Runner which supported running tasks using a\nDocker executor. This soon became our [officially supported Runner](/blog/unofficial-runner-becomes-official/).\nIt runs tasks concurrently, which was a big win.\n\nThis new Runner was built with Docker support right from the beginning because\nDocker provides an easy way for us to run each task in a fresh clean environment,\nwithout any leftovers from previous builds. It also takes care of downloading and\ninstalling our build dependencies for us. Because of this, using the Docker\nexecutor in GitLab CI is our _recommended_ approach for running most tasks.\n\n### GitLab CI autoscaling with Docker Machine\n\nThe GitLab CI Runners also support autoscaling, which allows us to provision and\nmanage multiple remote Docker hosts. We built autoscaling with the help of [Docker Machine](https://docs.docker.com/machine/).\nDocker Machine supports a vast number of [virtualization and cloud providers](https://docs.docker.com/machine/drivers/).\n\nBecause the Runners will autoscale, our infrastructure contains only as many\nbuild instances as necessary. The autoscaling feature promotes heavy\nparallelization of our tests, so they run quickly. The machines we don't\nneed are shut down, so we only need to pay for what we are using.\n\nCheck out our [autoscale runners release blog post](/releases/2016/03/29/gitlab-runner-1-1-released/#autoscaling-increases-developer-happiness)\nfor more information on how we've found autoscaling to increase developer\nhappiness.\n\n### An integrated Docker Registry\n\n[GitLab Container Registry](https://docs.gitlab.com/ee/administration/container_registry.html)\nis a secure and private registry for Docker images. Built on [open source software](https://github.com/docker/distribution),\nGitLab Container Registry isn't just a standalone registry; it's _completely_\nintegrated with GitLab.\n\nThe registry is the place to store and tag images for later use. Developers may\nwant to maintain their own registry for private, company images, or for\nthrow-away images used only in testing. Using GitLab Container Registry means\nyou don't need to set up and administer yet another service, or use a public\nregistry.\n\nCheck out our [announcement blog post](/blog/gitlab-container-registry/)\nfor more details on how the GitLab Container Registry simplify your development\nand deployment workflows.\n\n## How we continue to scale using Docker\n\n### Scaling our Tests\n\nAll of our source branches for GitLab are [tested](https://gitlab.com/gitlab-org/gitlab-ce/pipelines)\nusing GitLab CI. We switched our builds to use the autoscaled Docker executor when\nwe released support for it in GitLab CI back in March.\n\nBefore switching to the autoscaled runners, tests were on average waiting for **10\nminutes** before an executor became available for them to run. Now the tests only\never need to wait a **few seconds** for a new Docker Machine to be brought up.\n\nIt is not just our own tests though. We have [enabled autoscaling on our Shared Runners\non GitLab.com](/blog/shared-runners/) for all\nyour projects on GitLab.com. And you run a lot of builds! On average, we have\nbeen running **94** autoscaled instances. We've seen the number currently running\njump up to a couple hundred at times. It's those peak times when you would have\nbeen waiting several minutes for their builds to start. Now it's only seconds!\n\n### Scaling our Builds\n\nThis month we have moved the building of our GitLab Omnibus Packages into Docker\nas well. Previously we were running a single dedicated VM for all 9 of the\nOperating Systems that we build GitLab packages for. Most package builds took\nabout half-an-hour, but because there was only one VM for each OS, doing a\n[security patch across 7 releases](/releases/2016/06/15/gitlab-8-dot-8-dot-5-released/)\nwould take a long time.\n\nMoving the builds to Docker and turning on auto-scaling allows us to run as many\nbuilds at a time as we need. We are not finished with the move quite yet, our\nDocker builds are currently half the speed of our previous system, taking a full\nhour per build. And flaky build failures often cause us to retry the builds at\nleast once per release. We still need to reintroduce some build caching and\nother improvements to fix these problems, but we expect to be able to quickly and\nconcurrently build our packages when it is all done. Feel free to\n[track our progress](https://gitlab.com/gitlab-org/omnibus-gitlab/issues/1232).\n\n## We \u003Ci class=\"fas fa-heart\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i> Docker\n{: #we-love-docker}\n\nAs you can see, providing a GitLab Docker image was just the beginning of our\nDocker obsession. Building Docker directly into GitLab CI, and adding a deeply\nintegrated Docker Registry into GitLab, is helping us to build and test GitLab\nquicker and more often. We hope it's helping you too!\n",{"slug":8927,"featured":6,"template":678},"how-we-scale-gitlab-by-having-docker-built-in","content:en-us:blog:how-we-scale-gitlab-by-having-docker-built-in.yml","How We Scale Gitlab By Having Docker Built In","en-us/blog/how-we-scale-gitlab-by-having-docker-built-in.yml","en-us/blog/how-we-scale-gitlab-by-having-docker-built-in",{"_path":8933,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8934,"content":8940,"config":8944,"_id":8946,"_type":16,"title":8947,"_source":17,"_file":8948,"_stem":8949,"_extension":20},"/en-us/blog/ssg-overview-gitlab-pages-part-3-examples-ci",{"title":8935,"description":8936,"ogTitle":8935,"ogDescription":8936,"noIndex":6,"ogImage":8937,"ogUrl":8938,"ogSiteName":692,"ogType":693,"canonicalUrls":8938,"schema":8939},"SSGs Part 3: Build any SSG site with GitLab Pages","Which SSGs can I use with GitLab Pages? How to set up GitLab CI to build my SSG site? Where can I find some examples?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749684858/Blog/Hero%20Images/ssg-overview-gitlab-pages-part-3-cover.jpg","https://about.gitlab.com/blog/ssg-overview-gitlab-pages-part-3-examples-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"SSGs Part 3: Build any SSG site with GitLab Pages\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Marcia Ramos\"}],\n        \"datePublished\": \"2016-06-17\",\n      }",{"title":8935,"description":8936,"authors":8941,"heroImage":8937,"date":8942,"body":8943,"category":14},[8399],"2016-06-17","\nWhich **Static Site Generators** (SSGs) can I use with **GitLab Pages**? How to set up GitLab CI to build\nmy SSG site? Where can I find some examples?\n\nIf these questions ring a bell, this **series of posts** is for you! We prepared three articles around\nthe same theme \"**Static Site Generators (SSGs)**\".\n\nThis is **Part 3: Build any SSG site with GitLab Pages**, where we'll show you some examples of SSGs\nusing distinct [GitLab CI](/topics/ci-cd/) configurations, so you can understand it and adjust it to your needs.\n\nRead through the previous posts:\n\n- [**Part 1: Dynamic vs Static Websites**][part-1]\n- [**Part 2: Modern Static Site Generators**][part-2]\n\n**Note:** For this series, we assume you are familiar with web development, curious about Static Site Generators,\nand excited to see your site getting deployed with GitLab Pages.\n{: .note}\n\n\u003C!-- more -->\n\n----------\n\n### What's in this page?\n{:.no_toc}\n\n- TOC\n{: toc}\n\n----\n\n## Build any SSG site with GitLab Pages\n\nIn the previous articles of this series on Static Site Generators, we explained the difference between\n[static and dynamic websites][part-1], and provided a general [overview on Modern SSGs][part-2]. Now let's\nunderstand how can we use them with [GitLab Pages][pages].\n\nYou can use [GitLab Pages][pages] to host and build **any** [SSG][ssgs] available!\nYou can also use custom domains, SSL/TLS certificates, create as many sites as you want,\nand deploy your site from **private repositories**. And that's all **for free** on GitLab.com!\nIf you are not familiar with GitLab Pages, you might want to read the article \"[Hosting on GitLab.com with GitLab Pages][post-pages]\",\nwhere you will find this information and a detailed step-by-step guide to walk you through the process.\nSee also the [quick start guide][pages] and the [official documentation][pages-ee] for further information.\n\n## SSGs examples\n\nOn the following tables you can explore some examples of SSGs sites **built with\nGitLab Pages** and hosted on GitLab.com. Some of them came from contributions from our community.\nWe'll be very happy to have [your contribution] too!\n\nThe key for [building your site with GitLab Pages][ci-for-pages] is the GitLab CI configuration\nfile, called `.gitlab-ci.yml`.\n\nTo make GitLab CI work for this specific purpose is necessary creating a job called `pages`, and generate your\ncompiled site to a `public` folder. Everything else is adjustable to your needs.\n\nOn the tables below, there are some examples we've gathered for you, organized by their respective environments.\n\n### Environment: [Ruby]\n\n| SSG | Website URL | Project URL | Configuration |\n| --- | ----------- | ----------- | -------------- |\n| [Jekyll] | [Default Theme][j-2-web] | [Source on GitLab][j-2-pro] | [Building Jekyll 3.1.2 with Bundler][j-2-ci] |\n| [Middleman] | [Default Theme][middle-prev] | [Source on GitLab][middle-proj] | [Default + Bundler `ENV=PRODUCTION`][middle-ci] |\n| [Nanoc] | [Default Theme][nanoc-prev] | [Source on GitLab][nanoc-proj] | [Default][nanoc-ci] |\n| [Octopress] | [Default Theme][octo-prev] | [Source on GitLab][octo-proj] | [Default][octo-ci] |\n\n### Environment: [Node JS][node]\n\n| SSG | Website URL | Project URL | Configuration |\n| --- | ----------- | ----------- | -------------- |\n| [Hexo] | [Hueman Theme][hexo-prev] | [Source on GitLab][hexo-proj] | [Default + `test` job][hexo-ci] |\n| [Brunch] | [Default Skeleton][brunch-prev] | [Source on GitLab][brunch-proj] | [Default][brunch-ci] |\n| [Harp] | [Default Theme][harp-prev] | [Source on GitLab][harp-proj] | [Default][harp-ci] |\n| [Metalsmith] | [Default Theme][metal-prev] | [Source on GitLab][metal-proj] | [Default][metal-ci] |\n| [GitBook] | [Default Theme][book-prev] | [Source on GitLab][book-proj] | [Default][book-ci] |\n\n### Environment: [Python]\n\n| SSG | Website URL | Project URL | Configuration |\n| --- | ----------- | ----------- | -------------- |\n| [Pelican] | [Default Theme][pelican-prev] | [Source on GitLab][pelican-proj] | [Default][pelican-ci] |\n| [Lektor] | [Default Theme][lektor-prev] | [Source on GitLab][lektor-proj] | [Default][lektor-ci] |\n| [Hyde] | [Default Theme][hyde-prev] | [Source on GitLab][hyde-proj] | [Default + `test` job][hyde-ci] |\n| [Nikola] | [Default Theme][nikola-prev] | [Source on GitLab][nikola-proj] | [Default][nikola-ci] |\n\n### Environment: [Go Lang][go]\n\n| SSG | Website URL | Project URL | Configuration |\n| --- | ----------- | ----------- | -------------- |\n| [Hugo] | [Beautiful Hugo Theme][hugo-prev] (Default) | [Source on GitLab][hugo-proj] | [Default][hugo-ci] |\n\n### More Examples\n{: #groups}\n\nOn the following GitLab groups you can find even more examples.\n\n| Group | Environment | SSGs |\n| ----- | ----------- | ---- |\n| [Pages][ci-examples] (Official) | Ruby, Node, Python, etc. | All SSGs presented on this post |\n| [Jekyll Themes][jekyll-examples] | Ruby | Jekyll |\n| [Middleman Themes][middle-examples] | Ruby | Middleman |\n| [Themes and Templates][themes-templates] | Miscellaneous | Miscellaneous |\n\n**Note:** these themes, templates, and SSGs were casually chosen, and listed on this post to provide you with\nsome distinct GitLab CI configurations.\n{: .note}\n\n## FAQ: which SSG should I get started with?\n\nThis is a tricky question, and there is no easy answer for it. Perhaps the best way\nto choose an SSG is installing three or four of them locally and give them a try. [This list][ssgs-more] might help too.\n\nHowever, if you don't know where to start, and you never used any Static Site Generator\nbefore, I suggest you to get started with [Jekyll]. Why?\n\n- It's very well documented\n- If you search the web for information you'll find plenty\n- Its template engine is rigid. Meaning, there's no chance to mess up the code\n- It is easy to learn\n\nBut this was merely a suggestion. There are better ways to choose. For example,\nif you are a programmer, you could choose the SSG according to the language it's\nwritten in (Ruby, JavaScript, Python, etc), picking up one you're familiar with.\nIf you are a PHP developer, you might want to choose an SSG with a PHP template engine. And so on.\n\n## Getting Help\n\nIf you need some help regarding GitLab Pages on GitLab.com,\nfeel free to use one of [our channels][get-help]. You can also\nopen an issue on the [Pages][pages-issues] group.\n\n## Conclusion\n\nWith this post we end up this series on SSGs. Hopefully, we got you inspired to start working with Static Site\nGenerators, and by now you comprehend what they are and how they work.\n\nYou are more than welcome to contribute with new SSGs, or with your cool themes and\ntemplates, to the [groups](#groups) mentioned earlier.\n\nIf you already work with an SSG, please let us know which one you prefer (click on the image below).\nIt's a quick survey that will only take a minute, and your participation means a lot to us! [Share it] with your friends too!\n\n[![Survey - SSGs on GitLab Pages](https://about.gitlab.com/images/blogimages/ssg-gitlab-pages-series/part-3-survey-ssgs-on-gitlab-pages.png)][CTA]\n\nDon't you have an account on [GitLab.com][sign-up] yet? Let's create one!\nRemember, we can use GitLab Pages to [build any SSG][post-pages] for us and host it for free on GitLab.com!\n\nFollow [@GitLab][twitter] on Twitter and stay tuned for updates!\n\n\u003C!-- Cover photo: https://unsplash.com/photos/bphc6kyobMg -->\n\n\u003C!-- IDENTIFIERS -->\n\n\u003C!-- SSGs series -->\n\n[part-1]: /blog/ssg-overview-gitlab-pages-part-1-dynamic-x-static/\n[part-2]: /blog/ssg-overview-gitlab-pages-part-2/\n\n\u003C!-- GitLab -->\n\n[CTA]: http://goo.gl/forms/9CKIkYqa4xQup1DF3\n\n[about-gitlab-com]: /\n[ci-for-pages]: /blog/gitlab-pages-setup/#gitlab-ci\n[your contribution]: https://gitlab.com/pages/pages.gitlab.io/blob/master/CONTRIBUTING.md\n[get-help]: /get-help/\n[gitlab-com]: /gitlab-com/\n[pages]: https://pages.gitlab.io\n[pages-ee]: http://doc.gitlab.com/ee/pages/README.html\n[pages-issues]: https://gitlab.com/pages/pages.gitlab.io/issues\n[post-pages]: /blog/gitlab-pages-setup/\n[Share it]: https://twitter.com/intent/tweet?hashtags=GitLabPages&original_referer=http%3A%2F%2Flocalhost%3A4567%2F2016%2F06%2F17%2Fssg-overview-gitlab-pages-part-3-examples-ci%2F&ref_src=twsrc%5Etfw&related=gitlab&text=SSGs%20on%20GitLab%20Pages&tw_p=tweetbutton&url=http%3A%2F%2Fgoo.gl%2Fforms%2F9CKIkYqa4xQup1DF3&via=gitlab\n[sign-up]: https://gitlab.com/users/sign_in \"Sign Up!\"\n[twitter]: https://twitter.com/gitlab\n\n\u003C!-- SSGs -->\n\n[blosxom]: http://blosxom.sourceforge.net/\n[Brunch]: http://brunch.io/\n[GitBook]: https://www.gitbook.com/\n[Harp]: http://harpjs.com/\n[Hexo]: https://hexo.io/\n[Hyde]: http://hyde.github.io/\n[Hugo]: https://gohugo.io/\n[Jekyll]: https://jekyllrb.com\n[Lektor]: https://www.getlektor.com/\n[Metalsmith]: http://www.metalsmith.io/\n[Middleman]: https://middlemanapp.com/\n[Nanoc]: https://nanoc.ws/\n[Nikola]: https://getnikola.com/\n[Octopress]: http://octopress.org/\n[Pelican]: http://blog.getpelican.com/\n\n[hexo-struc]: https://hexo.io/docs/setup.html\n[jekyll-struc]: https://jekyllrb.com/docs/structure/\n[middle-struc]: https://middlemanapp.com/basics/directory-structure/\n\n[jek-sitemap]: https://github.com/jekyll/jekyll-sitemap\n[middle-sitemap]: https://middlemanapp.com/advanced/sitemap/\n[hexo-sitemap]: https://github.com/hexojs/hexo-generator-sitemap\n\n[SSGs]: https://www.staticgen.com/\n[ssgs-more]: https://iwantmyname.com/blog/2014/05/the-updated-big-list-of-static-website-generators-for-your-site-blog-or-wiki\n\n\u003C!-- Languages, preprocessors, libraries and frameworks -->\n\n[animate.css]: https://daneden.github.io/animate.css/\n[Bootstrap]: http://getbootstrap.com\n[CoffeeScript]: http://coffeescript.org/\n[Foundation]: http://foundation.zurb.com/\n[go]: https://golang.org/\n[haml]: http://haml.info/\n[html5-boiler]: https://html5boilerplate.com/\n[jquery]: http://code.jquery.com/\n[kramdown]: http://kramdown.gettalong.org/\n[liquid]: https://shopify.github.io/liquid/\n[markdown]: https://en.wikipedia.org/wiki/Markdown\n[modernizr]: https://modernizr.com/\n[node]: https://nodejs.org/en/\n[normalize]: https://necolas.github.io/normalize.css/\n[Python]: https://www.python.org/\n[rdiscount]: http://dafoster.net/projects/rdiscount/\n[redcarpet]: https://github.com/vmg/redcarpet\n[redcloth]: http://redcloth.org/\n[Ruby]: https://www.ruby-lang.org/\n[Sass]: http://sass-lang.com/\n[skeleton]: http://getskeleton.com/\n[Slim]: http://slim-lang.com/\n[Stylus]: http://stylus-lang.com/\n[twig]: http://twig.sensiolabs.org/\n\n\u003C!-- Groups -->\n\n[ci-examples]: https://gitlab.com/groups/pages\n[jekyll-examples]: https://gitlab.com/groups/jekyll-themes\n[middle-examples]: https://gitlab.com/groups/middleman-themes\n[themes-templates]: https://gitlab.com/themes-templates\n\n\u003C!-- Examples -->\n\n[j-2-web]: https://jekyll-themes.gitlab.io/default-bundler/ \"The default Jekyll Theme\"\n[j-2-pro]: https://gitlab.com/jekyll-themes/default-bundler\n[j-2-ci]: https://gitlab.com/jekyll-themes/default-bundler/blob/master/.gitlab-ci.yml\n\n[j-3-web]: https://jekyll-themes.gitlab.io/grayscale/ \"A single page Jekyll template\"\n[j-3-pro]: https://gitlab.com/jekyll-themes/grayscale\n[j-3-ci]: https://gitlab.com/jekyll-themes/grayscale/blob/master/.gitlab-ci.yml\n\n[hugo-prev]: https://pages.gitlab.io/hugo/\n[hugo-proj]: https://gitlab.com/pages/hugo\n[hugo-ci]: https://gitlab.com/pages/hugo/blob/master/.gitlab-ci.yml\n\n[middle-prev]: https://middleman-themes.gitlab.io/middleman/\n[middle-proj]: https://gitlab.com/middleman-themes/middleman\n[middle-ci]: https://gitlab.com/middleman-themes/middleman/blob/master/.gitlab-ci.yml\n\n[hexo-prev]: https://themes-templates.gitlab.io/hexo/\n[hexo-proj]: https://gitlab.com/themes-templates/hexo\n[hexo-ci]: https://gitlab.com/themes-templates/hexo/blob/master/.gitlab-ci.yml\n\n[brunch-prev]: https://pages.gitlab.io/brunch/\n[brunch-proj]: https://gitlab.com/pages/brunch\n[brunch-ci]: https://gitlab.com/pages/brunch/blob/master/.gitlab-ci.yml\n\n[harp-prev]: https://pages.gitlab.io/harp/\n[harp-proj]: https://gitlab.com/pages/harp\n[harp-ci]: https://gitlab.com/pages/harp/blob/master/.gitlab-ci.yml\n\n[metal-prev]: https://pages.gitlab.io/metalsmith/\n[metal-proj]: https://gitlab.com/pages/metalsmith\n[metal-ci]: https://gitlab.com/pages/metalsmith/blob/master/.gitlab-ci.yml\n\n[lektor-prev]: https://pages.gitlab.io/lektor/\n[lektor-proj]: https://gitlab.com/pages/lektor\n[lektor-ci]: https://gitlab.com/pages/lektor/blob/master/.gitlab-ci.yml\n\n[hyde-prev]: https://pages.gitlab.io/hyde/\n[hyde-proj]: https://gitlab.com/pages/hyde\n[hyde-ci]: https://gitlab.com/pages/hyde/blob/master/.gitlab-ci.yml\n\n[nanoc-prev]: https://pages.gitlab.io/nanoc/\n[nanoc-proj]: https://gitlab.com/pages/nanoc\n[nanoc-ci]: https://gitlab.com/pages/nanoc/blob/master/.gitlab-ci.yml\n\n[pelican-prev]: https://pages.gitlab.io/pelican/\n[pelican-proj]: https://gitlab.com/pages/pelican\n[pelican-ci]: https://gitlab.com/pages/pelican/blob/master/.gitlab-ci.yml\n\n[book-prev]: https://pages.gitlab.io/gitbook/\n[book-proj]: https://gitlab.com/pages/gitbook\n[book-ci]: https://gitlab.com/pages/gitbook/blob/pages/.gitlab-ci.yml\n\n[octo-prev]: https://pages.gitlab.io/octopress/\n[octo-proj]: https://gitlab.com/pages/octopress\n[octo-ci]: https://gitlab.com/pages/octopress/blob/master/.gitlab-ci.yml\n\n[nikola-prev]: https://pages.gitlab.io/nikola/\n[nikola-proj]: https://gitlab.com/pages/nikola\n[nikola-ci]: https://gitlab.com/pages/nikola/blob/master/.gitlab-ci.yml\n",{"slug":8945,"featured":6,"template":678},"ssg-overview-gitlab-pages-part-3-examples-ci","content:en-us:blog:ssg-overview-gitlab-pages-part-3-examples-ci.yml","Ssg Overview Gitlab Pages Part 3 Examples Ci","en-us/blog/ssg-overview-gitlab-pages-part-3-examples-ci.yml","en-us/blog/ssg-overview-gitlab-pages-part-3-examples-ci",{"_path":8951,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8952,"content":8958,"config":8962,"_id":8964,"_type":16,"title":8965,"_source":17,"_file":8966,"_stem":8967,"_extension":20},"/en-us/blog/ssg-overview-gitlab-pages-part-2",{"title":8953,"description":8954,"ogTitle":8953,"ogDescription":8954,"noIndex":6,"ogImage":8955,"ogUrl":8956,"ogSiteName":692,"ogType":693,"canonicalUrls":8956,"schema":8957},"SSGs Part 2: What are modern static site generators","This is Part 2: Modern Static Site Generators, where we provide you with an overview on the subject.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749684846/Blog/Hero%20Images/ssg-overview-gitlab-pages-cover.jpg","https://about.gitlab.com/blog/ssg-overview-gitlab-pages-part-2","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"SSGs Part 2: What are modern static site generators\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Marcia Ramos\"}],\n        \"datePublished\": \"2016-06-10\",\n      }",{"title":8953,"description":8954,"authors":8959,"heroImage":8955,"date":8960,"body":8961,"category":14},[8399],"2016-06-10","\nWhat are Static Site Generators? What are they for? Why should I use them? Do they have\nlimitations? How can I use them with **GitLab Pages**?\n\nIf these questions ring a bell, this **series of posts** is for you! We are preparing\nthree articles around the same theme \"**Static Site Generators (SSGs)**\".\n\nThis is **Part 2: Modern Static Site Generators**, where we provide you with an overview on\nthe subject.\n\nThe previous post was [**Part 1: Dynamic x Static Websites**][part-1], where we briefly explained\nthe differences between them, and their pros and cons.\n\nStay tuned for the next post: **[Part 3: Build any SSG site with GitLab Pages][part-3]**!\n\n**Note:** For this series, we assume you are familiar with web development, curious about\nStatic Site Generators, and excited to see your site getting deployed with GitLab Pages.\n{: .note}\n\n\u003C!-- more -->\n\n----------\n\n### What's in this overview?\n{:.no_toc}\n\n- TOC\n{: toc}\n\n----\n\n## Benefits of Modern Static Site Generators\n\nStatic Site Generators (**[SSGs]**) are software created to automate web development to\n**output** static sites from **dynamic** writing. So, we code dynamically and publish\nstatically. No pain, all gain.\n\nThe most fascinating thing of any SSG is the ability to code fast, save money (on web\nhosting), and incredibly [decrease the page loading time][page-load]\n(compared to server-side dynamic webpages). Also, if we have a lot of visitors at the same\ntime, our [static sites have less chance to crash][server-crash] due to server overload\n[than dynamic ones][site-down].\n\n**Note:** if you want to know more about it, read the introductory article for this series:\n\"[SSGs Part 1: Static x Dynamic Websites][part-1]\".\n{: .note}\n\n## Structure of SSGs\n\nThe structure of SSGs is a combination of features to make static sites development faster\nand less repetitive. Let's take a quick look at the list below, then describe them one by one.\n\n- Environment\n- Template engine\n- Markup language\n- Preprocessors\n- Directory structure\n\n### \u003Ci class=\"fas fa-terminal fa-fw\" style=\"color:rgb(226,67,41); font-size:.85em\">\u003C/i> Environment\n{: #environment}\n\nThe **environment**, also called **platform**, consists essentially on the [programming language]\nthe SSG was written in. It will make difference on the configuration, customization, and performance\nof the SSG. Examples: [Ruby], [Python], [Node JS][node].\n\n\u003Ca name=\"template-engine\">\u003C/a>\n\n### \u003Ci class=\"fas fa-cogs fa-flip-horizontal fa-fw\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i> Template engine\n{: #template_engine}\n\nThe **template engine** is very important we understand, since all the dynamic structure of our sites\nwill depend on that. It's essential that we choose an SSG with a [templating system][template-sys]\nthat we can use comfortably. Examples: [Liquid], [Haml] and [Slim]  (Ruby), [Twig]  (PHP),\n[Swig]  (JavaScript).\n\nTo give you a picture, let's see an example for an HTML file, in which we are using the\n[Liquid Templating Engine][liquid]:\n\n```html\n\u003C!DOCTYPE html>\n\u003Chtml lang=\"en\">\n\t{% include head.html %}\n\u003Cbody>\n\t{% include header.html %}\n\t\u003Cmain class=\"content\">\n\t\t{{ content }}\n\t\u003C/main>\n\t{% include footer.html %}\n\u003C/body>\n\u003C/html>\n```\n\nAs you may have guessed, we have three files for the content that **repeats** sitewide (head, header\nand footer), which are included to every page using this template. The only thing that is different\nis the `{{ content }}` of that page, which is written in a separate file, and also included\ndynamically to the template with this tag. Finally, all the files will be **compiled** to regular\nHTML pages **before** being stored in the web server. This process is called **build**. GitLab Pages\n**builds** any SSG.\n\n_Advantages over flat HTML_\n\n- Minimize typography errors (\"typos\"): all files are considerably reduced, improving readability\n- Avoid repetition: every block repeated sitewide would be included to every page, equivalently\n- Update faster: if we change something in the file `footer.html`, it will affect the entire site\n\n### \u003Ci class=\"fas fa-pencil-alt fa-flip-horizontal fa-fw\" style=\"color:rgb(226,67,41); font-size:.85em\">\u003C/i> Markup language\n{: #markup-language}\n\n**[Markup language]** is a system to write documents making them somehow syntactically distinguishable\nfrom text. [Lightweight markup languages][wiki-markup] have a simplified and unobtrusive syntax, designed to be\neasily written within any text editor. That's what we'll use to write our content.\n\nThe majority of SSGs use **markdown engines** for this purpose. But there are many more\nlightweight markup languages used likely, such as [AsciiDoc], [Textile] and [ReStructuredText].\n\nAmong those SSGs which use markdown markup, generally we are allowed to choose which markdown engine\nwe want to use. It is set up on the site configuration.\nFor example, in Ruby there are a handful of Markdown implementations:\n[Kramdown], [RDiscount], [Redcarpet], [RedCloth].\n\nA blog **post** or a **page** written in [markdown] will most likely start with a **front matter**\nsection containing information about that page or post, and then comes the content just below it.\nThis is an `example.md` file used in a [Jekyll] site, and also an `example.html.md` file for\na [Middleman] site:\n\n```markdown\n---\n# front matter (between three-dashes block)\ntitle: \"Hello World\" # post or page title\ndate: YYYY-MM-DD HH:MM:SS # date and time, e.g. \"2016-04-30 11:00:00\"\nauthor: \"Foo Bar\" # a common variable to exemplify\n---\n\n# An h1 heading\n\nSome text.\n```\n\nThe front matter variables, which are `title`, `date` and `author` for our example above,\ncan be called with template tags all over the site. With Liquid, if we write:\n\n```liquid\n\u003Ch2>Title: {{ page.title }}\u003C/h2>\n\u003Cp>Date: {{ page.date }}\u003C/p>\t \n\u003Cp>By {{ page.author }}\u003C/p>\n```\n\nThe output would be:\n\n```\n\u003Ch2>Title: Hello World\u003C/h2>\n\u003Cp>Date: 2016-04-30 11:00:00\u003C/p>\n\u003Cp>By Foo Bar\u003C/p>\n```\n\nThe content for our example would output simply:\n\n```html\n\u003Ch1>An h1 heading\u003C/h1>\n\u003Cp>Some text.\u003C/p>\n```\n\n### \u003Ci class=\"fas fa-puzzle-piece fa-fw\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i> Preprocessors\n{: #preprocessors}\n\nThe **preprocessors** are made to speed up our development process too. They simplify\nthe way we code, and then compile their own files into standard ones. Examples: [Sass]\nand [Stylus] for CSS, [CoffeeScript] for JavaScript.\n\nAgain, just to give you a picture, check a CSS code block written in CSS directly, and\nthe other written in Sass:\n\nCSS:\n\n```css\nh1 {\n  color: #333;\n  padding-top: 30px;\n}\np {\n  color: #333;\n}\n```\n\nSass:\n\n```sass\n$clr = #333\nh1\n  color: $clr\n  padding-top: 30px\np\n  color: $clr\n```\n\nIn a large-scale styling, saving all curly brackets `{ }` and semi-colons `;` makes a lot\nof difference for who is typing. Also, with Sass variables (e.g., `$clr` above), we can\ndefine some standards and apply them all over our stylesheets. In the end, everything\nwill be compiled to regular CSS. There are more interesting features and advantages of \npreprocessors, but that's not in focus on this post. \n\nBy the way, the given Sass example will be compiled exactly to the CSS code above it.\n\n### \u003Ci class=\"far fa-folder-open fa-fw\" style=\"color:rgb(226,67,41); font-size:.85em\">\u003C/i> Directory structure\n{: #directory-structure}\n\nThe **directory structure** is different for each SSG. It's important to study the file\ntree before we start working with an SSG, otherwise we might face odd build errors that\nwe won't understand solely because we didn't use its structure accordingly.\nExamples: [Hexo structure][hexo-struc], [Middleman structure][middle-struc],\n[Jekyll structure][jekyll-struc]. So, just make sure you add new files to the correct directories.\n\n## SSGs built-in features\n\nIn addition to their standard components, there are also a number of built-in features\nthat make building and previewing static sites easier - and faster. For example:\n\n- Most of SSGs have a pre-installed server for previewing the sites locally\n- Some of them also contain in their installation package a LiveReload plugin, so we\ndon't need to refresh the page in our browser every time we save it\n- Most of them provide us with built-in compilers for their supported preprocessors\n\n## Blog-Aware SSGs\n\nOne of the most attractive features for the majority of modern SSGs is the ability to manage\nblog content without the need of storing posts, or post contents,\nin databases or in server-side-only processed files.\n\nA blog-aware website generator will create blog-style content, such as lists of content in\nreverse chronological order, archive lists, and other common blog-style features.\nHow would an SSG do that?\n\nWith their file tree and their template engine. The file tree defines the specific\ndirectory for `posts` and the template engine calls the posts dynamically.\n\nWith a `for` loop through the posts, they can be displayed in a single page, as\nillustrated below (with [Liquid]):\n\n```liquid\n  \u003Cul>\n    {% for post in site.posts %}\n      \u003Cli>\n        \u003Cspan>{{ post.date }}\u003C/span>\n        \u003Ch2>\n          \u003Ca class=\"post-link\" href=\"{{ post.url }}\">{{ post.title }}\u003C/a>\n        \u003C/h2>\n      \u003C/li>\n    {% endfor %}\n  \u003C/ul>\n```\n\nThis code means that, **for each post** within the **site posts**\n(`{% for post in site.posts %}`), all of them would be displayed as items of an\nunordered list of posts, within links for their respective paths.\n\nOf course, we can adapt the HTML structure according to our needs. Also, we can use\nthe blog-aware structure to create different kinds of dynamic insertion. For example,\nwe could use them to display multiple things within the same category, as a collection\nof photos, books, etc. So, each time we add a new item, the SSG uses it's template\nengine to bring our collections together.\n\n## Supported content\n\nStatic servers fully support any language or script interpreted by browsers, known as\n[**client-side** processing][part-1]. Let's just remember that a static site is essentially\ncomposed of three components: the structure (HTML), the layout and styles (CSS),\nand the behavior (JavaScript).\n\n_Supported languages and file extensions_\n\n- Common file extensions: `.html` / `.css` / `.js` / `.xml` / `.pdf` / `.txt`\n- Common media files: [images], [audio], [video], [SVG]\n\n_Supported interactive services (examples)_\n\n- Commenting Systems (e.g., [Disqus], [Facebook Comments], and [many others][comment-systems])\n- Live Chat (e.g., [JivoChat], [Tawk.to])\n- [PayPal Payments Standard]\n- [Facebook Social Plugins]\n- [Twitter Kit]\n- Google Apps (e.g., [Analytics], [Adwords], [AdSense], etc)\n- Site Search Engine (e.g., [Google Search][google-cse], [Swiftype], [Tipue])\n- Mailing lists and blog subscriptions (e.g., [MailChimp])\n\n_Supported utilities (examples)_\n\n- HTML/CSS/JS frameworks and libraries. E.g, [Bootstrap], [Foundation], [Normalize], [Modernizr], [Skeleton], [jQuery], [HTML5 Boilerplate][html5-boiler], [Animate.css]\n- [Schema.org] markup, making [search engines][schema-seo] to understand our site content better. This is [one of the numerous SEO][seo] techniques\n- [Sitemaps], important for [SEO][seo-sitemaps] too. E.g., [Jekyll Sitemap plugin][jek-sitemap], [Middleman Sitemap][middle-sitemap], [Hexo Sitemap plugin][hexo-sitemap]\n\n## Limitations of SSGs\n\nWe've just described what we **can do** with SSGs. Now let's see what we **cannot**.\n\n- Register users\n- Have admin access\n- Send emails via `mail()` function\n- Use any server-side language or script\n\nThese kinds of actions depend necessarily on server-side processing, which are not handled\nby static-only web servers, as we explained in the [first post of this series][part-1].\n\n### Overcoming the limitations\n\n_User Authentication_\n\nDespite not having the ability to register users, nor having admin access for ourselves,\nwith tools like [Firebase] we can power-up our static site with\n[user authentication][firebase-user-auth]. Find more [cool stuff][firebase-cool-stuff] here,\nfrom the same source.\n\n_Content management_\n\nWe can edit the content of our SSGs directly from the web browser with [Teletext.io]. We can't\ncreate new pages, but we can edit pages' content easily. Follow the [Teletext.io tutorial] to learn\nhow to implement this for your own website.\n\n_Contact Forms_\n\nYes, we can offer contact forms in our static websites. We can't process the **server-side**\nscript in our static-server, but there are some third-party services we can use for that.\nFor example, you can try [Formspree], [FormKeep], [Wufoo], [FoxyForm], [Google Forms] or any\nother related service . However, if you want to take control over your mail script, you can\ntry the [parse method with SendGrid][sendgrid-parse].\n\n_JavaScript disabled_\n\nEverything based on JavaScript is allowed to be added to our static sites. However, if\nJavaScript is disabled on the user's browser, those scripts will not work. But there is\nsomething we can do to minimize this issue. We can add a [`\u003Cnoscript>`][no-script] tag\nto our web pages, containing a message that will be displayed only if JavaScript disabled:\n\n```html\n\u003Cnoscript>Please enable JavaScript on your browser for a better experience with this website!\u003C/noscript>\n```\n\n## Conclusion\n\nHopefully now you understand the logic of Static Site Generators, how we can use them wisely,\nand what we can and cannot do with them. Dynamic websites are great, for sure. \nBut if we don't need all their functionality, SSGs are certainly wonderful alternatives.\n\nIn the [third post][part-3], which is the last chapter of this series, we will bring you a lot of examples\nfor SSGs already running on GitLab Pages. Therefore, we're confident you'll be able to see and \nunderstand different GitLab CI configurations, and create your own.\n\nWe already have prepared a bunch of SSGs example projects, you'll find them in the\n[GitLab Pages][ci-examples] official group. You are very welcome to [contribute][pages-contribute]\nwith new SSGs.\n\nDon't you have an account on [GitLab.com][sign-up] yet? Let's create one! Remember, we can\nuse GitLab Pages to [build any SSG][post-pages] for us and host it for free!\n\nFollow [@GitLab][twitter] on Twitter and stay tuned for updates!\n\n### Useful links\n\n- [GitLab Pages Quick Start][pages] - learn how to get started with GitLab Pages by forking an existing project\n- [GitLab Pages on GitLab.com][post-pages] - learn how to set up a GitLab Pages project from strach\n- [GitLab Pages Docs][pages-ee] - the official documentation with all the details you might be interested in\n- [SSGs Part 1: Static vs Dynamic Websites][part-1] - the first post of this series\n- [SSGs Part 3: Build any SSG site with GitLab Pages][part-3] - the third post of this series\n\n\u003C!-- Cover image: https://unsplash.com/photos/6g0KJWnBhxg -->\n\n\u003C!-- IDENTIFIERS -->\n\n\u003C!-- Alphabetical, miscellaneous -->\n\n[part-1]: /blog/ssg-overview-gitlab-pages-part-1-dynamic-x-static/\n[part-3]: /blog/ssg-overview-gitlab-pages-part-3-examples-ci/\n\n[AdSense]: https://support.google.com/adsense/answer/181950\n[Adwords]: https://support.google.com/adwords/answer/6331314\n[Analytics]: https://developers.google.com/analytics/devguides/collection/analyticsjs/\n[AsciiDoc]: https://en.wikipedia.org/wiki/AsciiDoc\n[audio]: http://www.w3schools.com/html/html5_audio.asp\n[comment-systems]: http://brianshim.com/webtricks/add-a-comment-wall-to-your-website/\n[Disqus]: https://disqus.com/\n[Facebook Comments]: https://developers.facebook.com/docs/plugins/comments\n[Facebook Social Plugins]: https://developers.facebook.com/docs/plugins\n[firebase]: https://www.firebase.com/\n[firebase-cool-stuff]: https://www.firebase.com/docs/web/examples.html\n[firebase-user-auth]: http://jsfiddle.net/firebase/a221m6pb/\n[FormKeep]: https://formkeep.com/\n[Formspree]: https://formspree.io/\n[foxyform]: http://www.foxyform.com/\n[google-cse]: https://support.google.com/customsearch/answer/4513751?hl=en&ref_topic=4513742&rd=1\n[Google Forms]: https://www.google.com/forms/about/\n[HTML5]: http://www.w3schools.com/html/html5_intro.asp\n[images]: http://vormplus.be/blog/article/using-images-in-your-html5-document\n[Jekyll]: https://jekyllrb.com\n[JivoChat]: https://www.jivochat.com/\n[MailChimp]: http://mailchimp.com/\n[Markup language]: https://en.wikipedia.org/wiki/Markup_language\n[no-script]: http://www.w3schools.com/tags/tag_noscript.asp\n[page-load]: https://www.smashingmagazine.com/2015/11/modern-static-website-generators-next-big-thing/#dynamic-websites-and-caching\n[PayPal Payments Standard]: https://developer.paypal.com/docs/classic/button-manager/integration-guide/SOAP/ButtonMgrOverview\n[programming language]: https://en.wikipedia.org/wiki/Programming_language\n[Schema.org]: http://schema.org/\n[schema-seo]: http://schema.org/docs/gs.html\n[sendgrid-parse]: https://sendgrid.com/blog/send-email-static-websites-using-parse/\n[SEO]: http://www.wordstream.com/blog/ws/2014/03/20/schema-seo\n[seo-sitemaps]: http://www.webconfs.com/importance-of-sitemaps-article-17.php\n[server-crash]: http://noahveltman.com/static-dynamic/\n[sitemaps]: https://support.google.com/webmasters/answer/156184?hl=en\n[site-down]: http://www.sitepoint.com/wordpress-vs-jekyll-might-want-make-switch/#2-wordpress-struggles-under-heavy-load\n[SSGs]: https://www.staticgen.com/\n[svg]: https://en.wikipedia.org/wiki/Scalable_Vector_Graphics\n[swiftype]: https://swiftype.com/\n[Tawk.to]: https://www.tawk.to/\n[teletext.io]: https://teletext.io/\n[teletext.io tutorial]: https://medium.com/teletext-io-blog/empower-your-static-generated-jekyll-site-with-instant-content-management-capabilities-82ce5569d7fb#.v2vo6pp2n\n[template-sys]: https://en.wikipedia.org/wiki/Web_template_system\n[tipue]: http://www.tipue.com/\n[Twitter Kit]: https://dev.twitter.com/web/overview\n[video]: http://www.w3schools.com/html/html5_video.asp\n[wiki-markup]: https://en.wikipedia.org/wiki/Lightweight_markup_language\n[Wufoo]: http://www.wufoo.com/\n\n\u003C!-- GitLab -->\n\n[get-help]: /get-help/\n[gitlab-com]: /gitlab-com/\n[pages]: https://pages.gitlab.io\n[pages-ee]: http://doc.gitlab.com/ee/pages/README.html\n[pages-issues]: https://gitlab.com/pages/pages.gitlab.io/issues\n[post-pages]: /blog/gitlab-pages-setup/\n[sign-up]: https://gitlab.com/users/sign_in \"Sign Up!\"\n[twitter]: https://twitter.com/gitlab\n\n\u003C!-- SSGs -->\n\n[hexo-struc]: https://hexo.io/docs/setup.html\n[jekyll-struc]: https://jekyllrb.com/docs/structure/\n[Middleman]: https://middlemanapp.com/\n[middle-struc]: https://middlemanapp.com/basics/directory-structure/\n\n[jek-sitemap]: https://github.com/jekyll/jekyll-sitemap\n[middle-sitemap]: https://middlemanapp.com/advanced/sitemap/\n[hexo-sitemap]: https://github.com/hexojs/hexo-generator-sitemap\n\n\u003C!-- Languages, preprocessors, libraries and frameworks -->\n\n[animate.css]: https://daneden.github.io/animate.css/\n[Bootstrap]: http://getbootstrap.com\n[CoffeeScript]: http://coffeescript.org/\n[Foundation]: http://foundation.zurb.com/\n[go]: https://golang.org/\n[haml]: http://haml.info/\n[html5-boiler]: https://html5boilerplate.com/\n[jquery]: http://code.jquery.com/\n[kramdown]: http://kramdown.gettalong.org/\n[liquid]: https://shopify.github.io/liquid/\n[markdown]: https://en.wikipedia.org/wiki/Markdown\n[modernizr]: https://modernizr.com/\n[node]: https://nodejs.org/en/\n[normalize]: https://necolas.github.io/normalize.css/\n[Python]: https://www.python.org/\n[rdiscount]: http://dafoster.net/projects/rdiscount/\n[redcarpet]: https://github.com/vmg/redcarpet\n[redcloth]: http://redcloth.org/\n[restructuredtext]: https://en.wikipedia.org/wiki/ReStructuredText\n[Ruby]: https://www.ruby-lang.org/\n[Sass]: http://sass-lang.com/\n[skeleton]: http://getskeleton.com/\n[Slim]: http://slim-lang.com/\n[Stylus]: http://stylus-lang.com/\n[textile]: https://en.wikipedia.org/wiki/Textile_(markup_language)\n[twig]: http://twig.sensiolabs.org/\n\n\u003C!-- Groups -->\n\n[ci-examples]: https://gitlab.com/groups/pages\n[jekyll-examples]: https://gitlab.com/groups/jekyll-themes\n[middle-examples]: https://gitlab.com/groups/middleman-themes\n[pages-contribute]: https://gitlab.com/pages/pages.gitlab.io/blob/master/CONTRIBUTING.md\n[themes-templates]: https://gitlab.com/themes-templates\n",{"slug":8963,"featured":6,"template":678},"ssg-overview-gitlab-pages-part-2","content:en-us:blog:ssg-overview-gitlab-pages-part-2.yml","Ssg Overview Gitlab Pages Part 2","en-us/blog/ssg-overview-gitlab-pages-part-2.yml","en-us/blog/ssg-overview-gitlab-pages-part-2",{"_path":8969,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8970,"content":8976,"config":8981,"_id":8983,"_type":16,"title":8984,"_source":17,"_file":8985,"_stem":8986,"_extension":20},"/en-us/blog/getting-started-with-gitlab-development-kit",{"title":8971,"description":8972,"ogTitle":8971,"ogDescription":8972,"noIndex":6,"ogImage":8973,"ogUrl":8974,"ogSiteName":692,"ogType":693,"canonicalUrls":8974,"schema":8975},"Getting Started with GitLab Development Kit","This post is helpful if you've considered developing a feature or fix for GitLab but are unsure how to set up a development environment.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749684698/Blog/Hero%20Images/getting-started-with-gitlab-development-kit.jpg","https://about.gitlab.com/blog/getting-started-with-gitlab-development-kit","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Getting Started with GitLab Development Kit\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Drew Blessing\"}],\n        \"datePublished\": \"2016-06-08\",\n      }",{"title":8971,"description":8972,"authors":8977,"heroImage":8973,"date":8979,"body":8980,"category":14},[8978],"Drew Blessing","2016-06-08","\n\n> This post is part of a series [Celebrating 1,000 Contributors][1k-post]\n\nGitLab is built on open-source and has a thriving community. We appreciate all\nof our existing contributors and we look forward to welcoming new contributors,\nas well. This post is helpful if you've considered developing a feature or fix\nfor GitLab but are unsure how to set up a development environment.\n\nAs with any Rails application, GitLab has a few moving parts: a database, Redis,\nSidekiq, the Rails application server, GitLab Workhorse, and GitLab Shell. It\ncan be a challenge to configure each of these components on your own. That's why\nwe created the GitLab Omnibus packages for users, recently highlighted in\n[another blog post][omnibus-blog-post]. Perhaps not as well known is that we\nalso have the [GitLab Development Kit (GDK)][gdk] to improve the experience for\ndevelopers. In this post we'll go through the steps necessary to get GDK set up\non your workstation.\n\n\u003C!-- more -->\n\nAll of the details here were obtained from the GDK [README file][gdk-readme],\nwhich is comprehensive and should be your first resource when you have\nquestions.\n\n## Installing Prerequisites\n\nFirst, you need to install some prerequisite items. Every platform has\ndifferent requirements and we've outlined the steps for each in the\n['Prerequisites for all platforms'][gdk-prereq] section of the README. We have\ninstructions for Mac, Ubuntu, Arch Linux, Debian, Fedora, and CentOS/Red Hat.\nFor example, to install prerequisites for a Mac, run the following commands in\nTerminal:\n\n```bash\nbrew tap homebrew/dupes\nbrew tap homebrew/versions\nbrew install git redis postgresql libiconv icu4c pkg-config cmake nodejs go openssl node npm\nbundle config build.eventmachine --with-cppflags=-I/usr/local/opt/openssl/include\nnpm install phantomjs@1.9.8 -g\n```\n\n## Installation\n\nNext, clone the GDK repository:\n\n```\ncd /path/to/your/workspace\ngit clone git@gitlab.com:gitlab-org/gitlab-development-kit.git\ncd gitlab-development-kit\n```\n\nBefore configuring GDK, fork any GitLab repositories that you plan\nto contribute to. By default, GDK will install using the source repositories,\nsuch as `https://gitlab.com/gitlab-org/gitlab-ce.git`. Community members do not\nhave privileges in the main `gitlab-ce` project so you will need a fork to\nsubmit merge requests. Here is a list of various GitLab repositories you may\nwant to fork:\n\n- **GitLab CE** - [https://gitlab.com/gitlab-org/gitlab-ce](https://gitlab.com/gitlab-org/gitlab-ce)\n- **GitLab EE** - [https://gitlab.com/gitlab-org/gitlab-ee](https://gitlab.com/gitlab-org/gitlab-ee)\n- **GitLab Shell** - [https://gitlab.com/gitlab-org/gitlab-shell](https://gitlab.com/gitlab-org/gitlab-shell)\n- **GitLab Workhorse** - [https://gitlab.com/gitlab-org/gitlab-workhorse](https://gitlab.com/gitlab-org/gitlab-workhorse)\n\nAfter forking any of the above repositories, you are ready to run the `make`\ncommand to install all components. Be sure to tell `make` about your forks.\nIf you chose not to fork one or more repositories you can leave off the\ncorresponding argument and GDK will use the source repository.\n\n```bash\nmake gitlab_repo=git@gitlab.com:example/gitlab-ce.git gitlab_shell_repo=git@gitlab.com:example/gitlab-shell.git gitlab_workhorse_repo=git@gitlab.com:example/gitlab-workhorse.git\n```\n\nThe above `make` command installs and configures all components.\nThen, run `support/set-gitlab-upstream` to automatically add an upstream remote\nin each cloned GitLab component. This will ensure that upstream changes are\npulled in later when you run `make update`.\n\nStart GDK by executing `./run`. All components will be started and output will\nbe logged to the console. You can access GitLab in your browser at\n`http://localhost:3000` or press `Ctrl-C` to stop all processes.\n\n## Making changes\n\nThe various component repositories are all cloned inside the GDK directory.\nFor example, GitLab code is checked out in `$GDK_HOME/gitlab`. Change in to\nthis directory and check out a new feature branch.\n\nAs you make changes you can refresh your browser to see the effects. In some\ncases, you may need to restart GDK to load the change. Restart by pressing\n`Ctrl-C` and then execute `./run` again.\n\n## Running tests\n\nSome changes will require writing tests, or running existing tests to ensure\nyou didn't break anything. GDK makes this very easy using the following commands:\n\n- `rake spinach` to run the spinach suite\n- `rake spec` to run the rspec suite\n- `rake gitlab:test` to run all the tests\n\nGitLab has a lot of tests and it can take a long time to run the full suite.\nUse the following command format to run tests in a single file:\n\n- `bundle exec rspec spec/controllers/commit_controller_spec.rb` for a rspec test\n- `bundle exec spinach features/project/issues/milestones.feature` for a spinach test\n\n## Opening a merge request\n\nIf all tests pass and you're ready to submit for review, commit the changes and\npush them to your fork. Then, visit GitLab.com and you should notice a banner\nnear the top. Click the blue 'Create Merge Request' button to initiate a merge\nrequest. This will create a merge request from your fork to the GitLab source\nproject so our team can review your contribution.\n\n![Last push widget](https://about.gitlab.com/images/gdk/last_push_widget.png)\n\nCongratulations! After installing the GitLab Development Kit you are\nwell-equipped to contribute to GitLab. We're happy to welcome you to our\ncommunity and look forward to your future contributions.\n\nFor more information on contributing to GitLab, please see our\n[Contributing Guide][contrib-guide].\n\n[1k-post]: /2016/05/24/1k-contributors/\n[omnibus-blog-post]: /2016/03/21/using-omnibus-gitlab-to-ship-gitlab/\n[gdk]: https://gitlab.com/gitlab-org/gitlab-development-kit/\n[gdk-readme]: https://gitlab.com/gitlab-org/gitlab-development-kit/blob/master/README.md\n[gdk-prereq]: https://gitlab.com/gitlab-org/gitlab-development-kit/tree/master#prerequisites-for-all-platforms\n[contrib-guide]: https://gitlab.com/gitlab-org/gitlab-ce/blob/master/CONTRIBUTING.md\n",{"slug":8982,"featured":6,"template":678},"getting-started-with-gitlab-development-kit","content:en-us:blog:getting-started-with-gitlab-development-kit.yml","Getting Started With Gitlab Development Kit","en-us/blog/getting-started-with-gitlab-development-kit.yml","en-us/blog/getting-started-with-gitlab-development-kit",{"_path":8988,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":8989,"content":8995,"config":8999,"_id":9001,"_type":16,"title":9002,"_source":17,"_file":9003,"_stem":9004,"_extension":20},"/en-us/blog/ssg-overview-gitlab-pages-part-1-dynamic-x-static",{"title":8990,"description":8991,"ogTitle":8990,"ogDescription":8991,"noIndex":6,"ogImage":8992,"ogUrl":8993,"ogSiteName":692,"ogType":693,"canonicalUrls":8993,"schema":8994},"SSGs Part 1: A Static vs Dynamic Website","This is Part 1: A Dynamic vs Static Website, where we go over their differences, pros and cons.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749684818/Blog/Hero%20Images/part-1-static-x-dynamic-cover.jpg","https://about.gitlab.com/blog/ssg-overview-gitlab-pages-part-1-dynamic-x-static","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"SSGs Part 1: A Static vs Dynamic Website\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Marcia Ramos\"}],\n        \"datePublished\": \"2016-06-03\",\n      }",{"title":8990,"description":8991,"authors":8996,"heroImage":8992,"date":8997,"body":8998,"category":14},[8399],"2016-06-03","\n**Static** Vs **Dynamic** websites, what is the difference? What are the advantages of one over another? Which ones can I use with **GitLab Pages**? What about **Static Site Generators**?\n\nIf these questions ring a bell, this **series of posts** is for you! We are preparing three articles around the same theme \"**Static Site Generators (SSGs)**\".\n\nThis is **Part 1: Dynamic vs Static Websites**, where we go over their differences, pros and cons.\n\nStay tuned for the next two posts:\n\n- **[Part 2: Modern Static Site Generators][part-2]**\n- **[Part 3: Build any SSG site with GitLab Pages][part-3]**\n\n**Note:** For this series, we assume you are familiar with web development, curious about Static Site Generators, and excited to see your site getting deployed with GitLab Pages.\n{: .note}\n\n\u003C!-- more -->\n\n----------\n\n### What's in this page?\n{:.no_toc}\n\n- TOC\n{: toc}\n\n----\n\n## A static Vs dynamic Website\n\nAt the core, the difference between static and dynamic websites is that a static website appears the same for every user that visits it. Static websites can only change when the source files are modified by a developer. A dynamic website, however, changes based on data from visitor behaviors and serves up different look, feel and content depending on the users. \n\n### Static website\n\nA static website is a combination of HTML markup (the text we see written on web pages), CSS (Cascading Style Sheets), which are the styles and layouts applied to that pages, and JavaScript, a programming language that defines their behavior (e.g., fade in and fade out, hover effects, etc.). These pages are stored as simple files, usually in a [VPS][wiki-vps], which are then served by a web server. When we type in our web browsers the URL for a web page like that, our browser (called _client_) is making an _HTTP request_ to that server, which identifies which files are being requested, and send them back to our browsers via an _HTTP response_.\n\n#### Advantages of a static site\n\nStatic sites are simple. They’re a collection of basic files that are manually updated as needed. Static sites are generally built using HTML and CSS and they’re a common choice for new or small companies to get their presence on the web.\n\nAnd even though static sites can require more time on the backend, they can also be faster from a user perspective because they don’t undergo any changes when requested - they just are as they are.  \n\n#### Disadvantages of a static site\n\nThough it can be an advantage, the simple style of a static site can also be a roadblock. The process of making changes to a given page is entirely manual because there’s no user interface or data processing to automate page changes. It can be time-consuming and repetitive, and far less scalable than a dynamic site. \n\nThe other major disadvantage of a static site is that it shows the same content to every visitor. That may work for certain page purposes, but content creation isn’t a one-size-fits-all scenario. Different content attracts and converts different visitors, so the same page for all is not always a good thing.\n\n### Dynamic website\n\nA dynamic website is more complicated than that. Besides the markup, the styles and the behavior, they do more things that our **web browsers** can identify. For example, if you are buying something online, it's easy to understand that the prices and the availability of that item are _dynamically_ recovered from some data, generally stored in _databases_. This process of recovering data and processing it _before_ responding to our browsers as web pages containing that information, is called _server-side_ processing.\n\n#### Advantages of a dynamic site\n\nA dynamic site is a bit more easily customizable without nearly the amount of manual work a static site change requires. This type of site changes more fluidly based on a visitor’s geographic location, time zone, and other preferences. Web servers build dynamic site pages at a random pace when a user requests a page. \n\nAfter that request, information is pulled by the server from one or more databases to custom build an HTML file that gets shipped back to the browser and becomes the page. No site visitor necessarily sees the same page as another, making the user experience more personalized.\n\n#### Disadvantages of a dynamic site\n\nAs opposed to the simplicity of a static site, a dynamic site can be a bit more complex to build and maintain due to its ever-changing nature. It may require a bit more developer knowledge or the help of an experienced developer to keep it updated, which can cost more in terms of learning or hiring. \n\nAlso, since the pages are more customized, the load time can be affected. Now let's take a better look into these processes to be able to understand how those things work, how important they are, and why this information is useful for us. \n\nHow about starting from the beginning?\n\n### A static Vs dynamic website: the history\n\nAbout 25 years ago, in 1990, [Tim Berners-Lee][tim-bl] [published][first-site-1990] the [first website in history][first-website]. It was a plain [static webpage] with a few tags and links. Three years later, in 1993, the birth of the [dynamic web] took place, when the [Common Gateway Interface (CGI)][wiki-cgi] was [first introduced][first-cgi]. CGI was a way to let a website run scripts on the web server and display the output.\nFrom then on, the evolution was huge.\n\nWith the advent of processing server-side scripts, came forward the [Web Content Management Systems (WCMS)][wcms], allowing us to create and maintain databases connected to the internet. Websites with such server-side processing, which provide high-level interactivity with the user, are commonly referred as [web applications][web-apps]. [GitLab] is one of them. Some notable examples are [WordPress], [Joomla!], [Drupal], [Magento], [Ghost], and [many others][cms-list].\n\nBesides connecting websites with databases, the dynamic web is an important asset to work with [template systems][template-sys]. By using them, developers write fast, update websites faster, and reduce mistakes (provoked by repetition).\n\nUnfortunately, with the growing popularity of server-side based websites, came together their [vulnerabilities][common-vulnerabilities]. [Security issues] are common among them, and there are a lot of [measures][security-web-apps] we need to take to prevent attacks of uncountable natures. We need to protect our users, our site, and our server. Everything in between is subjected to attacks.\n\nAn intelligent counter-measure for avoiding those security threats and, at the same time, maintaining the benefits of templating systems, was the creation of **Static Site Generators (SSGs)**. With them, we write dynamically and publish statically.\n\nSSGs came out on the early 2000's, with [Blosxom] in 2003, and [WebGen] in 2004. In 2008, [Tom Preston-Werner][tom-pw] released [Jekyll], by far the [most popular SSG][ssgs-list] up to now. The interest for Static Site Generators have increased considerably in the last few years, as you can see at the chart below, from [Google Trends]:\n\n![Static Site Generators - Google Trends](https://about.gitlab.com/images/blogimages/ssg-gitlab-pages-series/part-1-ssg-google-trends.png)\n\n## Server processing: static vs dynamic web pages\n\nLet's take a look at the image below and see [how static pages and dynamic pages][static-x-dynamic-video] communicate with the web server.\n\n**Web server** software, such as [Apache], [NGINX] and [IIS], are able to store and read static files only: HTML, CSS and JavaScript. **Application server** software, as [PHP], [Cold Fusion] or [ASP.NET] to name a few, are the only ones able to interpret dynamic scripting.\n\nEvery browser (known as _client_) communicates with **web servers only**, via HTTP _(HyperText Transfer Protocol)_, with a URL _(Uniform Resource Locator)_.\n\n![Static vs Dynamic server processing](https://about.gitlab.com/images/blogimages/ssg-gitlab-pages-series/part-1-dynamic-x-static-server.png)\n\n**Scheme A:** the client (web browser) sends an **HTTP request** to the web server with a URL. The HTML _(HyperText Markup Language)_ file requested, stored in the web server, is immediately returned to the client with an **HTTP response**, and its content is interpreted by the browser and then displayed to the user. This is known as _client-side processing_.\n\n**Scheme B:** the client sends an **HTTP request** to the **web server**, which dispatches the request to the **application server**. The application server may **request data from a database** first, and then **constructs the HTTP response** based on the data recovered from the database. This response is passed back to the **web server**, which returns the HTML file, constructed by the application server, to the client, via **HTTP response**. This is called _server-side processing_.\n\nThe main difference is, dynamic webpages are not served as-is by the web server as static pages are. They are constructed for every HTTP request sent by each client.\n\nThese additional steps, necessary for dynamic websites, increase the time for the user to receive the HTTP response from the server with the requested page (URL). And nobody likes waiting.\n\nServer resources are also affected by dynamic websites as for each HTTP request, the same content needs to be constructed again and again.\n\nThere's another main advantage of static over dynamic sites. Static pages don't process user data, circumventing a major security issue related to dynamic web applications: user privacy. If the users don't send any data to your server, there is no data to be stolen.\n\n## Conclusion\n\nFully-featured server providers (Scheme B) have the capability of processing server-side scripts for web applications. Their structure is more complex and naturally more expensive, whereas static web servers (Scheme A), which only handle static pages, can be maintained with less cost. With [GitLab Pages][pages] you can host your site for **free**.\n\nThe majority of web developers don't write static sites anymore. It does take a lot more time, both to write and update, than dynamic ones. But, as previously commented, SSGs resolve this problem. We can code dynamically and the SSG outputs only static webpages for us. That's the content uploaded to our web server, in this particular case, **GitLab Pages**, which runs on NGINX.\n\nStay tuned for the next article of this series, in which we will provide you with an overview on **[Modern Static Site Generators][part-2]**, explaining how they work, what they support, and why should we really consider using SSGs for our sites.\n\nSee you there!\n\nDon't you have an account on [GitLab.com][sign-up] yet? Let's create one!\n\nFollow [@GitLab][twitter] on Twitter and stay tuned for updates!\n\n\u003C!--\nCover image: https://pixabay.com/en/ball-http-www-crash-administrator-63527/\nOther images:\nApp server: https://pixabay.com/en/computer-database-network-server-156948/\nWeb server: https://pixabay.com/en/computer-network-proxy-server-156950/\nDatabase: https://pixabay.com/en/database-data-storage-information-309919/\nMan at the computer: http://publicdomainvectors.org/en/free-clipart/Vector-illustration-of-man-at-computer/3839.html\nIllustration (Static x Dynamic Websites): Marcia Ramos for GitLab, Inc.\n-->\n\n\u003C!-- IDENTIFIERS -->\n\n[part-2]: /blog/ssg-overview-gitlab-pages-part-2/\n[part-3]: /blog/ssg-overview-gitlab-pages-part-3-examples-ci/\n\n\u003C!-- Alphabetical, miscellaneous -->\n\n[blosxom]: http://blosxom.sourceforge.net/\n[cms-list]:  https://en.wikipedia.org/wiki/List_of_content_management_systems\n[common-vulnerabilities]:  https://www.toptal.com/security/10-most-common-web-security-vulnerabilities\n[dynamic web]:  https://en.wikipedia.org/wiki/Dynamic_web_page\n[first-cgi]:  https://www.pingdom.com/blog/a-history-of-the-dynamic-web/\n[first-site-1990]:  http://www.telegraph.co.uk/technology/internet/12061803/The-worlds-first-website-went-online-25-years-ago-today.html\n[first-website]: http://info.cern.ch/hypertext/WWW/TheProject.html\n[GitLab]: / \"GitLab.com, GitLab CE, GitLab EE\"\n[google trends]: //www.google.com.br/trends/explore?hl=en-US#q=%22static+site+generator%22&cmpt=q&tz=Etc/GMT%2B3&tz=Etc/GMT%2B3\n[Jekyll]: https://jekyllrb.com\n[security issues]: https://www.cs.columbia.edu/~smb/classes/f06/l09.pdf\n[security-web-apps]: https://msdn.microsoft.com/en-us/library/zdh19h94.aspx\n[ssgs-list]: https://staticsitegenerators.net/\n[static webpage]: https://en.wikipedia.org/wiki/Static_web_page\n[static-x-dynamic-video]: https://www.youtube.com/watch?v=zC03bcuVZHY\n[template-sys]: https://en.wikipedia.org/wiki/Web_template_system\n[tim-bl]: https://en.wikipedia.org/wiki/Tim_Berners-Lee\n[tom-pw]: https://en.wikipedia.org/wiki/Tom_Preston-Werner\n[wcms]: https://en.wikipedia.org/wiki/Web_content_management_system\n[web-apps]: https://en.wikipedia.org/wiki/Web_application\n[webgen]: http://webgen.gettalong.org/news.html#webgen-0-1-0-released\n[wiki-cgi]:  https://en.wikipedia.org/wiki/Common_Gateway_Interface\n[wiki-vps]: https://en.wikipedia.org/wiki/Virtual_private_server \"Virtual Private Server\"\n\u003C!-- GitLab -->\n\n[pages]: https://pages.gitlab.io\n[sign-up]: https://gitlab.com/users/sign_in \"Sign Up!\"\n[twitter]: https://twitter.com/gitlab\n\n\u003C!-- Server software -->\n\n[Apache]: //www.apache.org/\n[NGINX]: https://www.nginx.com/\n[IIS]: //www.iis.net/\n[PHP]: //php.net/\n[Cold Fusion]: https://www.adobe.com/products/coldfusion/\n[ASP.NET]: http://www.asp.net/\n\n\u003C!-- CMS -->\n\n[drupal]: https://www.drupal.org/\n[ghost]: https://ghost.org/\n[joomla!]: https://www.joomla.org/\n[magento]: https://magento.com/\n[wordpress]: https://wordpress.org/\n",{"slug":9000,"featured":6,"template":678},"ssg-overview-gitlab-pages-part-1-dynamic-x-static","content:en-us:blog:ssg-overview-gitlab-pages-part-1-dynamic-x-static.yml","Ssg Overview Gitlab Pages Part 1 Dynamic X Static","en-us/blog/ssg-overview-gitlab-pages-part-1-dynamic-x-static.yml","en-us/blog/ssg-overview-gitlab-pages-part-1-dynamic-x-static",{"_path":9006,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9007,"content":9012,"config":9016,"_id":9018,"_type":16,"title":9019,"_source":17,"_file":9020,"_stem":9021,"_extension":20},"/en-us/blog/gitlab-container-registry",{"title":9008,"description":9009,"ogTitle":9008,"ogDescription":9009,"noIndex":6,"ogImage":8792,"ogUrl":9010,"ogSiteName":692,"ogType":693,"canonicalUrls":9010,"schema":9011},"Introducing GitLab Container Registry","Built on open source software, GitLab Container Registry isn't just a standalone registry; it's completely integrated with GitLab.","https://about.gitlab.com/blog/gitlab-container-registry","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing GitLab Container Registry\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Pundsack\"}],\n        \"datePublished\": \"2016-05-23\",\n      }",{"title":9008,"description":9009,"authors":9013,"heroImage":8792,"date":9014,"body":9015,"category":14},[8525],"2016-05-23","\n\nYesterday [we released GitLab 8.8][8.8], super powering GitLab's built-in\ncontinuous integration. With it, you can build a pipeline in GitLab,\nvisualizing your builds, tests, deploys and any other stage of the life cycle of\nyour software. Today (and already in GitLab 8.8), we're releasing the next\nstep: GitLab Container Registry.\n\n## What is GitLab Container Registry?\n\nGitLab Container Registry is a secure and private registry for Docker images.\nBuilt on open source software,\nGitLab Container Registry isn't just a standalone registry;\nit's _completely_ integrated with GitLab.\n\nGitLab is all about having a single, integrated experience and our registry\nis no exception. You can now easily use your images for [GitLab CI](/topics/ci-cd/), create\nimages specific for tags or branches and much more.\n\nOur container registry is the first Docker registry that is\nfully integrated with Git repository management and comes out of the box with\nGitLab 8.8. So if you've upgraded, you already have it!\nThis means our integrated Container Registry requires no additional\ninstallation. It allows for easy upload and download of images\nfrom GitLab CI. And it's free.\n\nRead the [administration documentation](https://docs.gitlab.com/ee/administration/packages/container_registry.html) to learn how to enable it\non your GitLab instance. (This documentation covers everything from self-signed certificates to environment variables, garbage collect commands, various APIs, curl commands, setting rate limits, how to use an external registry, and more.)\n\n[8.8]: https://about.gitlab.com/releases/2016/05/22/gitlab-8-8-released/\n\n\u003C!-- more -->\n\n## Some Docker basics\n\nThe main component of a Docker-based workflow is an image, which contains\neverything needed to run an application. Images are often created automatically\nas part of continuous integration, so they are updated whenever code changes.\nWhen images are built to be shared between developers and machines, they need to\nbe stored somewhere, and that's where a container registry comes in.\n\nThe registry is the place to store (or host) and tag images for later use. Developers may\nwant to maintain their own private registry for private images, or for\nthrow-away images used only in testing. Using GitLab Container Registry means\nyou don't need to set up and administer yet another service, or use a public\nregistry.\n\n## Tight integration\n\nGitLab Container Registry is fully-integrated with GitLab making it easy for\ndevelopers to code, test, and deploy Docker container images using GitLab CI\nand other Docker-compatible tooling.\n\n- User authentication is from GitLab itself, so all the user and group\n  definitions are respected.\n- There's no need to create repositories in the registry; the project is already\n  defined in GitLab.\n- Projects have a new tab, **Container Registry**, which lists all images\n  related to the project.\n- Every project can have an image repository, but this can be turned off\n  per-project.\n- Developers can easily upload and download images from GitLab CI.\n- There's no need to download or install additional software.\n\n## How GitLab Container Registry can simplify your workflow\n\nGitLab Container Registry is seamless and secure.\nHere are some examples of how GitLab Container Registry can simplify your\ndevelopment and deployment workflows:\n\n- Easily build Docker images with the help of GitLab CI and store them in the\n  GitLab Container Registry.\n- Easily create images per branches, tags, or any other way suitable to your\n  workflow, and with little effort, store them on GitLab.\n- Use your own build images, stored in your registry to test your applications\n  against these images, allowing you to simplify the Docker-based workflow.\n- Let the team easily contribute to the images, using the same workflow they are\n  already accustomed to. With the help of GitLab CI you can automatically\n  rebuild images that inherit from yours, allowing you to easily deliver fixes\n  and new features to a base image used by your teams.\n- Have a full Continuous Deployment and Delivery workflow by pointing your\n  CaaS to use images directly from GitLab Container Registry. You'll be able to\n  perform automated deployments of your applications to the cloud\n  (Docker Cloud, Docker Swarm, Kubernetes and others) when you build and test\n  your images.\n\n## How to start using GitLab Container Registry\n\nFirst, ask your system administrator to enable GitLab Container Registry\nfollowing the [administration documentation][admin-docs].\n\nAfter that, you will be allowed to enable **Container Registry** for your project.\n\n![](https://about.gitlab.com/images/container-registry/project_feature.png)\n\nTo start using your brand new **Container Registry** you first have to login:\n\n```\ndocker login registry.example.com\n```\n\nThen you can simply build and push images to GitLab:\n\n```\ndocker build -t registry.example.com/group/project .\ndocker push registry.example.com/group/project\n```\n\nGitLab also offers simple Container Registry management. Go to your project and click **Container Registry**.\nThis view will show you all tags in your repository and will allow you to delete them and view details about each tag, such as when it was published and how much storage it consumes.\n\n![](https://about.gitlab.com/images/container-registry/container_registry.png)\n\n> Read more in the [GitLab Container Registry user guide][user-docs].\n\n## Use with GitLab CI\n\nYou can use GitLab's integrated CI solution to build, push, and deploy your\ncontainer images.\n\n> **Note:** This feature requires GitLab Runner 1.2.\n\n> **Note:**\nTo use Docker in Docker images you need to have [the `privileged` flag][privflag]\nset up in your Runner's configuration. This is **not** the case for the shared\nRunners on GitLab.com for now; we plan to enable this flag next week. For the\nmoment you can use your own Runners.\n\nHere's an example GitLab CI configuration file (`.gitlab-ci.yml`) which builds\nan image, runs tests, and if the tests are successful, tags the build and\nuploads the build to the container registry:\n\n```yaml\nbuild_image:\n  image: docker:git\n  services:\n  - docker:dind\n  script:\n    - docker login -u gitlab-ci-token -p $CI_BUILD_TOKEN registry.example.com\n    - docker build -t registry.example.com/my-group/my-project .\n    - docker run registry.example.com/my-group/my-project /script/to/run/tests\n    - docker push registry.example.com/my-group/my-project:latest\n  only:\n    - master\n```\n\nHere's a more elaborate example that splits up the tasks into four stages,\nincluding two tests that run in parallel. The build is stored in the container\nregistry and used by subsequent stages, downloading the image automatically\nwhen needed. Changes to `master` also get tagged as `latest` and deployed using\nan application-specific deploy script:\n\n```yaml\nimage: docker:git\nservices:\n- docker:dind\n\nstages:\n- build\n- test\n- release\n- deploy\n\nvariables:\n  CONTAINER_TEST_IMAGE: registry.example.com/my-group/my-project:$CI_BUILD_REF_NAME\n  CONTAINER_RELEASE_IMAGE: registry.example.com/my-group/my-project:latest\n\nbefore_script:\n  - docker login -u gitlab-ci-token -p $CI_BUILD_TOKEN registry.example.com\n\nbuild:\n  stage: build\n  script:\n    - docker build -t $CONTAINER_TEST_IMAGE .\n    - docker push $CONTAINER_TEST_IMAGE\n\ntest1:\n  stage: test\n  script:\n    - docker run $CONTAINER_TEST_IMAGE /script/to/run/tests\n\ntest2:\n  stage: test\n  script:\n    - docker run $CONTAINER_TEST_IMAGE /script/to/run/another/test\n\nrelease-image:\n  stage: release\n  script:\n    - docker pull $CONTAINER_TEST_IMAGE\n    - docker tag $CONTAINER_TEST_IMAGE $CONTAINER_RELEASE_IMAGE\n    - docker push $CONTAINER_RELEASE_IMAGE\n  only:\n    - master\n\ndeploy:\n  stage: deploy\n  script:\n    - ./deploy.sh\n  only:\n    - master\n```\n\n## Summary\n\nGitLab Container Registry is the latest addition to GitLab's integrated set of\ntools for the software development lifecycle and comes with\n[GitLab 8.8 and up][8.8]. With GitLab Container Registry,\ntesting and deploying Docker containers has never been easier.\nGitLab Container Registry is available on-premises in GitLab CE and GitLab EE\nat no additional cost and installs in the same infrastructure as the rest of\nyour GitLab instance.\n\nContainer Registry is enabled on GitLab.com, the pricing is simple (it's completely free), and you can start using it right now!\n\n> **Note:**\nTo use Docker in Docker images you need to have [the `privileged` flag][privflag]\nset up in your Runner's configuration. This is **not** the case for the shared\nRunners on GitLab.com for now. We plan to enable this flag next week.\n\n[8.8]: /releases/2016/05/22/gitlab-8-8-released/\n[user-docs]: https://docs.gitlab.com/ee/user/packages/container_registry/\n[admin-docs]: http://docs.gitlab.com/ee/administration/container_registry.html\n[privflag]: https://gitlab.com/gitlab-org/gitlab-runner/blob/master/docs/executors/docker.md#use-docker-in-docker-with-privileged-mode\n",{"slug":9017,"featured":6,"template":678},"gitlab-container-registry","content:en-us:blog:gitlab-container-registry.yml","Gitlab Container Registry","en-us/blog/gitlab-container-registry.yml","en-us/blog/gitlab-container-registry",{"_path":9023,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9024,"content":9030,"config":9034,"_id":9036,"_type":16,"title":9037,"_source":17,"_file":9038,"_stem":9039,"_extension":20},"/en-us/blog/getting-started-gitlab-and-shippable",{"title":9025,"description":9026,"ogTitle":9025,"ogDescription":9026,"noIndex":6,"ogImage":9027,"ogUrl":9028,"ogSiteName":692,"ogType":693,"canonicalUrls":9028,"schema":9029},"Getting Started with GitLab and Shippable Continuous Integration"," To know more about how to set up Shippable CI with GitLab, read on.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672330/Blog/Hero%20Images/1-aye_aye_gitlab.png","https://about.gitlab.com/blog/getting-started-gitlab-and-shippable","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Getting Started with GitLab and Shippable Continuous Integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2016-05-05\",\n      }",{"title":9025,"description":9026,"authors":9031,"heroImage":9027,"date":9032,"body":9033,"category":14},[890],"2016-05-05","\n\n_This tutorial is a re-post of [Shippable's blog post](http://blog.shippable.com/getting-started-gitlab-with-shippable-ci)._\n\nGitLab is a fast-growing choice for enterprises managing their application code and team\ncollaboration both on-premises and in the cloud. Today we are excited to announce Shippable\nsupport for application delivery pipelines for GitLab developers.\n\nShippable now extends [GitLab](/)'s combination of Git-based source code repositories and enterprise \nfeatures such as authentication and security with CI/CD pipelines. Shippable connects with \nGitLab self-managed community and enterprise editions as well as their cloud offering so we \nwork the way you want to. \n\n\u003C!-- more -->\n\n> \"We're excited that Shippable now integrates with GitLab. With Shippable anyone using GitLab \ncan now easily deploy their code, independent of their stack and cloud environment.\" says Job van der Voort \nVP of Product at GitLab. \"Like GitLab, Shippable works with established technologies, as well as with \ncontainers, making this an excellent solution for organizations of any size to bring their code from \ntheir repositories in GitLab to production.\"\n\nOur new Shippable integration for GitLab supports running builds for your push commits, merge requests \nand even show the build status in GitLab, so you can enjoy the power of Shippable within GitLab. To know \nmore about how to set up Shippable CI with GitLab, read on.\n\n---\n\n## Setting up GitLab account integration\n\nTo start off, [sign into Shippable](https://app.shippable.com/). (GitLab.com identity support coming soon.)\nOnce signed into Shippable:\n\n* Click on the gear icon for 'Account Settings' in the top right navigation bar\n* Click on the 'Integrations' tab\n* Click on 'Add Integration' \n\n![Add integration in account settings](https://about.gitlab.com/images/blogimages/shippable-blog-images/2-Add_Integration_in_Account_settings.png)\n\n* Select the drop down under the 'Master Integration' & select 'GitLab'\n* For the 'Integration Name', use a distinctive name that's easy to recall\n* For 'url', enter the API end-point of your GitLab instance, this is usually of the format [https://your-gitlab.com/api/v3](https://your-gitlab.com/api/v3). If you're using GitLab.com, this will be [https://gitlab.com/api/v3](https://gitlab.com/api/v3). \n* For 'token', navigate to your GitLab Profile Settings in your GitLab instance and copy the Private Token provided under Account.\n\n![Account integration](https://about.gitlab.com/images/blogimages/shippable-blog-images/3-acc_int.png)\n\n* Click save. \n\nNext, let's sync the Account to ensure the permissions are up to date. To do this, go to the 'Accounts' \ntab & click 'Sync'.\n\n![Account sync](https://about.gitlab.com/images/blogimages/shippable-blog-images/4-accountSync.png)\n\nOnce synced, your GitLab subscription should be available from the 'Subscriptions' drop down in \nthe dashboard page. You can now proceed to enable any repository (project) and configure your\n[Continuous Integration](http://docs.shippable.com/ci_configure/).\n\n## Build triggers \n\nShippable automatically triggers build runs for all GitLab code commits or merge requests, indepenedent \nof the user who initiated it. Once the build triggers, you can view the [build status and build details](http://docs.shippable.com/ci_builds/). In addition, Shippable integrates with GitLab build status API and displays the build status directly in GitLab as shown below.\n\n![Account sync](https://about.gitlab.com/images/blogimages/shippable-blog-images/5-example.png)\n\n## Get a build going \n\nTo get a build going for a project, [add the Shippable config](http://docs.shippable.com/ci_configure/) to it and then proceed to enable the project \nfrom your subscription in Shippable. Any build failures are notified instantly via email, and in Gitlab's \nbuild status. Slack, and Hipchat, IRC notifications can also be [configured](http://docs.shippable.com/int_notifications/).\n\nYou can even proceed to configure [Continuous Delivery](http://docs.shippable.com/pipelines_overview/) using Shippable Pipelines and Docker. With Continuous Integration\nensuring that the features are working fine, and Continuous Delivery automatically deploying the successful builds, \nyou can ensure a smooth, fast, bug-less transition from development to production.\n\n---\n\nAt GitLab, we are always excited to integrate with products that our customers use. Thanks to the Shippable team \nfor writing this blog post and for working with us.\n",{"slug":9035,"featured":6,"template":678},"getting-started-gitlab-and-shippable","content:en-us:blog:getting-started-gitlab-and-shippable.yml","Getting Started Gitlab And Shippable","en-us/blog/getting-started-gitlab-and-shippable.yml","en-us/blog/getting-started-gitlab-and-shippable",{"_path":9041,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9042,"content":9048,"config":9052,"_id":9054,"_type":16,"title":9055,"_source":17,"_file":9056,"_stem":9057,"_extension":20},"/en-us/blog/look-into-gitlab-infrastructure",{"title":9043,"description":9044,"ogTitle":9043,"ogDescription":9044,"noIndex":6,"ogImage":9045,"ogUrl":9046,"ogSiteName":692,"ogType":693,"canonicalUrls":9046,"schema":9047},"An inside look at the infrastructure of GitLab.com","In this post, you'll find out just how many servers we use. You'll gain some perspective on what those servers are up to.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749684519/Blog/Hero%20Images/goal.jpg","https://about.gitlab.com/blog/look-into-gitlab-infrastructure","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"An inside look at the infrastructure of GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tomasz Maczukin\"}],\n        \"datePublished\": \"2016-04-29\",\n      }",{"title":9043,"description":9044,"authors":9049,"heroImage":9045,"date":9050,"body":9051,"category":14},[3538],"2016-04-29","\n\nA number of people have asked about the infrastructure of GitLab.com. Our passionate\nand curious Twitter followers inquired specifically about how many servers we use for\nGitLab.com. Given the number of questions we've gotten on this topic, we wanted to go\nahead and offer an inside look at our GitLab.com infrastructure. In this post,\nyou'll find out just how many servers we use. You'll gain some perspective on what\nthose servers are up to.\n\n\u003C!-- more -->\n\n## Baseline\n\nFor running GitLab.com as an application we have:\n\n- 5 HAProxy load balancers that are handling GitLab.com HTTP, HTTPS, and SSH\n- 2 HAProxy load balancers that are handling \"[alternative SSH][altssh]\" (altssh.GitLab.com) so they do redirection from 443 to 22\n- 2 HAProxy load balancers that are handling \u003Chttps://pages.gitlab.io> HTTP and HTTPS\n- 20 workers running GitLab EE application stack (Nginx, Workhorse, Unicorn + Rails, Redis + Sidekiq)\n- 2 NFS servers for the storage\n- 2 Redis servers\n- 2 PostgreSQL servers\n- 3 Elasticsearch servers\n\nThose are servers that we manage directly. With that, the server count is at 38.\n\n## Next\n\nWe also use 6 of Azure's \"Availability Sets\": 3 for load balancers, 1 for Redis HA, 1 for\nPostgreSQL HA, and 1 for Elasticsearch HA. Each of these Availability Sets has its own \"internal\"\nload balancer that is managing the HA traffic. If we count them as a GitLab.com servers, then\nwe need to add 6 servers (now, the count is 44).\n\nWe also have 3 servers for GitLab Runners in [autoscale mode][scale]. Two of them are managing autoscaling\nof runners for GitLab CE/EE projects (so they are used only by GitLab and I will not count them).\nBut the third is used to manage [autoscaling for Shared Runners][shared] at GitLab.com. So +1 for\nthe \"Shared Runners manager.\"\n\nWe also have some servers that are specific for GitLab as a company (Runners for building\nOmnibus packages, etc.) but I wouldn't count that as a part of GitLab.com.\n\nAnd at the end, we have 45 servers that are used to make GitLab.com a usable application for our\nusers.\n\n## But wait, there's more\n\nAh! Don't forget about autoscaled Docker machines! Each user's builds are running on Docker hosts\ncreated \"on demand\" by the autoscaling mode of the Runner. Last week, I looked at a diagram of the\nmachine utilization and it showed that we had:\n\n- minimum 12 machines running at once,\n- maximum 150 machines running at once,\n- an average of 54 machines running at once.\n\nBecause Shared Runners can be used by all GitLab.com users then I would count them as well!\n\n## Final count\n\nSo, the answer is, GitLab.com is currently running on 45 servers. However, if we also\ncount the build hosts for Shared Runners, then GitLab.com is using 60 to 200 servers!\n\nWe appreciate the question and the curiosity. As always, keep the questions coming!\nYou can also visit [our Operations issue tracker](https://gitlab.com/gitlab-com/operations/issues) for a live look at what\nthe team is working on.\n\n[altssh]: /blog/gitlab-dot-com-now-supports-an-alternate-git-plus-ssh-port/\n[shared]: /blog/shared-runners/\n[scale]: /releases/2016/03/29/gitlab-runner-1-1-released/\n",{"slug":9053,"featured":6,"template":678},"look-into-gitlab-infrastructure","content:en-us:blog:look-into-gitlab-infrastructure.yml","Look Into Gitlab Infrastructure","en-us/blog/look-into-gitlab-infrastructure.yml","en-us/blog/look-into-gitlab-infrastructure",{"_path":9059,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9060,"content":9066,"config":9071,"_id":9073,"_type":16,"title":9074,"_source":17,"_file":9075,"_stem":9076,"_extension":20},"/en-us/blog/getting-started-with-gitlab-and-digitalocean",{"title":9061,"description":9062,"ogTitle":9061,"ogDescription":9062,"noIndex":6,"ogImage":9063,"ogUrl":9064,"ogSiteName":692,"ogType":693,"canonicalUrls":9064,"schema":9065},"Getting started with GitLab and DigitalOcean","This tutorial is adapted from the How To Use the GitLab One-Click Install Image to Manage Git Repositories tutorial on DigitalOcean.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672941/Blog/Hero%20Images/sharks-paper.jpg","https://about.gitlab.com/blog/getting-started-with-gitlab-and-digitalocean","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Getting started with GitLab and DigitalOcean\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Achilleas Pipinellis\"}],\n        \"datePublished\": \"2016-04-27\",\n      }",{"title":9061,"description":9062,"authors":9067,"heroImage":9063,"date":9068,"body":9069,"category":14,"tags":9070},[8273],"2016-04-27","\n_This tutorial is adapted from the [How To Use the GitLab One-Click Install\nImage to Manage Git Repositories][do-source] tutorial on DigitalOcean._\n\n### Introduction\n\nCollaborating on projects, keeping track of source changes, and maintaining a\nclean code repository are some great reasons to use a [version control](/topics/version-control/) system.\nVersion control is now considered an essential tool in software development.\n\nGit is the most popular distributed version control system. GitLab is a Git\nrepository management server that can be used to host repositories and set up\ncontrol structures for git within a clean web interface. It is built on Ruby on\nRails.\n\n[DigitalOcean] has created a GitLab application image that can be used to\ninstantly deploy GitLab Community Edition on a DigitalOcean Ubuntu 14.04 x86-64\ndroplet using the [Omnibus installer]. You can have your own repository system\nup and running in minutes.\n\n\u003C!-- more -->\n\n## Requirements\n\nThe [GitLab documentation recommends a minimum of 2GB of RAM and 2 CPU cores][req]\nfor optimum performance. If your projects are small (fewer than 100 users total),\n1GB of RAM and 1 CPU core may be sufficient, but make sure you have at least 1GB\nswap. You can follow the [How To Add Swap on Ubuntu 14.04][swap] tutorial.\n\nLow memory may result in `500` server errors (\"cannot allocate memory\"), so make\nsure you add enough RAM according to your needs.\n\n## Step One – Create a GitLab Droplet\n\nBefore you begin using GitLab, you need to spin up a DigitalOcean droplet using\nthe provided image.\n\nFrom the Control Panel, click on the \"Create Droplet\" button that is visible\nfrom any page:\n\n![Create Droplet](https://about.gitlab.com/images/blogimages/getting-started-with-gitlab-and-digitalocean/create-droplet-shadow.png)\n\nUnder the \"Choose an image\" section, select the \"One-click Apps\" tab and click\nthe \"GitLab\" image (the version might differ).\n\n![Droplet app](https://about.gitlab.com/images/blogimages/getting-started-with-gitlab-and-digitalocean/select_gitlab_app-shadow.png)\n\nThe next step is to choose the droplet size and the region you would like to use.\n\n![Hardware](https://about.gitlab.com/images/blogimages/getting-started-with-gitlab-and-digitalocean/hardware-shadow.png)\n\nAdd any SSH Keys, select any settings you'd like to use, and click \"Create\" at\nthe bottom.\n\n![Finalize creation](https://about.gitlab.com/images/blogimages/getting-started-with-gitlab-and-digitalocean/finalize-shadow.png)\n\nYour GitLab droplet will be created and available in a few minutes!\n\n## Step Two – Configure Domain Names and Emails\n\nWe still need to configure just a few things first to utilize our environment.\n\nBegin by setting up the domain name you would like to use for your GitLab\ninstance. Learn [how to set up domain names on DigitalOcean][do-domain] if you\nwant to use DigitalOcean's nameservers.\n\n>**Note:**\nIf you don't have a domain name at your disposal, you can just use your\ndroplet's IP address, but GitLab will _not_ be able to send emails without\nusing an SMTP server.\n\nOnce your domain name is configured correctly, you need to adjust some values\non the actual VPS instance. Log into your droplet as root through SSH, and open\nthe GitLab configuration file with your text editor:\n\n```\nvim /etc/gitlab/gitlab.rb\n```\n\nUncomment and adjust the `external_url` parameter to match your domain name:\n\n```\nexternal_url \"http://your_domain.com/\"\n```\n\nIf you have [generated an SSL certificate][ssl] for your domain, you can\nconfigure GitLab to use it in this file as well. The following settings set the\nlocations of the SSL certificates and instruct the NGINX web server to redirect\nthe HTTP traffic to HTTPS. Note that for HTTPS to work, you **must** set the URL\nscheme in `external_url` as `https`:\n\n```\nexternal_url \"https://your_domain.com/\"\n\nnginx['ssl_certificate'] = \"/etc/gitlab/ssl/your_domain.com.crt\"\nnginx['ssl_certificate_key'] = \"/etc/gitlab/ssl/your_domain.com.key\"\nnginx['redirect_http_to_https'] = true\n```\n\nGitLab uses its own bundled NGINX web server. You can find more information like\nusing your own external web server, changing the ports NGINX listens to, etc.,\nin the [official documentation][nginx-docs].\n\nWhile we're in this file, we can adjust the email settings that GitLab will use\nin the \"From:\" field in automated emails, and the email display name, respectively:\n\n```\ngitlab_rails['gitlab_email_from'] = \"gitlab@your_domain.com\"\ngitlab_rails['gitlab_email_display_name'] = 'Example'\n```\n\nThe GitLab One-Click application is configured to use a local postfix server\nfor sending emails. If you wish to use it with GitLab you might want to set it\nup correctly. Follow the \"[How To Install and Configure Postfix as a Send-Only\nSMTP Server on Ubuntu 14.04][do-postfix]\" tutorial for more information. For a\nproduction site, you will likely want to use an external service such as\nMandrill or SendGrid. If so, you can [configure the SMTP settings][gl-smtp] here\nas well.\n\nAfter saving and closing the file, we just need to reconfigure the service:\n\n```\ngitlab-ctl reconfigure\n```\n\nThis should be done whenever you make configuration changes in order for them to\ntake effect.\n\n## Step Three – Log into GitLab\n\nWhen you connect to your GitLab instance via SSH or the DigitalOcean web console,\nyou will see the message of the day (MOTD) which contains your randomly generated\nGitLab password. It will look like this:\n\n```\n------------------------------------------------------------------------------\nThank you for using DigitalOcean's GitLab Application.\nYour GitLab instance can be accessed at http://xxx.xxx.xxx.xxx/\nThe default credentials for GitLab are:\nUsername: root\nPassword: e0wXRM4fLmb6\n\nYou can find more information on using this image at: http://do.co/gitlabapp\n------------------------------------------------------------------------------\n```\n\nNext, open a web browser and navigate to your domain name (or the IP address of\nyour Droplet if you did not set up a domain name). You will be able to log in\nusing the credentials you found above.\n\n![Login](https://about.gitlab.com/images/blogimages/getting-started-with-gitlab-and-digitalocean/login-shadow.png)\n\nYou now have a full GitLab server configured and at your disposal to manage your\nrepositories.\n\n![Landing](https://about.gitlab.com/images/blogimages/getting-started-with-gitlab-and-digitalocean/landing-shadow.png)\n\n## Step Four – Modify Account Information\n\nIt would probably be more helpful if the account you're using, more accurately\nreflected your information. This will allow you to receive email updates and\nwill display your information to other users. The root account is the first one\ncreated by default, and it contains some predefined values for \"Name\", \"Username\"\nand \"Email\". You can change all that from the \"Admin Area\".\n\nLet's navigate to the \"Admin Area\" by clicking the wrench icon in the top-right\ncorner.\n\n![Admin](https://about.gitlab.com/images/blogimages/getting-started-with-gitlab-and-digitalocean/admin_button-shadow.png)\n\nIn the left sidebar click **Users**. This should only contain one user, the\nAdministrator account you are logged into.\n\n![Users area](https://about.gitlab.com/images/blogimages/getting-started-with-gitlab-and-digitalocean/admin_users-shadow.png)\n\nClick on the \"Edit\" button and change the account information at the top.\n\nThe \"Name\" field will be your name as displayed to other users. The \"Username\"\ncan be used to log in and it defines the owner of your projects. The \"Email\"\nis where alerts will be sent.\n\nIt is important to at least change the email field.\n\n![Account edit](https://about.gitlab.com/images/blogimages/getting-started-with-gitlab-and-digitalocean/account-shadow.png)\n\nClick \"Save changes\" at the bottom for the changes to take effect.\n\n## Updating to Newer Releases\n\nThe GitLab One-Click application is configured to use the GitLab Apt repository.\n[Updating to the most recent version][update-doc] is as simple as running:\n\n```\nsudo apt-get update\nsudo apt-get upgrade\n```\n\nBefore upgrading to a new release, GitLab automatically backups the database.\nIf you wish to make a full back up of your existing installation, run:\n\n```\nsudo gitlab-rake gitlab:backup:create\n```\n\nThe resulting backup will be located in: `/var/opt/gitlab/backups`. You can read\nmore in the [Omnibus backup documentation][backup].\n\n## Conclusion\n\nYou should now have a server configured to handle your team's Git projects. You\ncan easily manage user access, configure both public and private repositories,\nand get an overview of your projects' issues and commits.\n\nGitLab has a great help system accessible from within the user interface\n(visit `https://your_domain.com/help`). In a future article, we will discuss\nhow to manage repositories and users and effectively take advantage of the\ninterface.\n\nFor further information on configuring your GitLab Omnibus installation, check\nout the [official documentation][omnidocs].\n\n---\n\nIf you already have GitLab installed and want to use the [integrated CI][glci]\nfor your projects, check our other tutorial on\n[setting up GitLab Runner on DigitalOcean][runner-do].\n\n---\n\n[![Powered by DigitalOcean](https://about.gitlab.com/images/blogimages/powered-by-do-badge-gray.png)](https://www.digitalocean.com/features/one-click-apps/gitlab/)\n\n---\n\n_Photo credits: \u003Chttps://flic.kr/p/9RAQ2J> ([CC BY-NC 2.0][cc])_\n\n[digitalocean]: https://www.digitalocean.com\n[omnibus installer]: /2016/03/21/using-omnibus-gitlab-to-ship-gitlab/\n[req]: http://doc.gitlab.com/ce/install/requirements.html#hardware-requirements\n[do-domain]: https://www.digitalocean.com/community/articles/how-to-set-up-a-host-name-with-digitalocean\n[ssl]: https://www.digitalocean.com/community/tutorials/how-to-install-an-ssl-certificate-from-a-commercial-certificate-authority\n[nginx-docs]: http://doc.gitlab.com/omnibus/settings/nginx.html\n[do-postfix]: https://www.digitalocean.com/community/tutorials/how-to-install-and-configure-postfix-as-a-send-only-smtp-server-on-ubuntu-14-04\n[gl-smtp]: http://doc.gitlab.com/omnibus/settings/smtp.html\n[backup]: http://doc.gitlab.com/omnibus/settings/backups.html\n[omnidocs]: http://doc.gitlab.com/omnibus\n[do-source]: https://www.digitalocean.com/community/tutorials/how-to-use-the-gitlab-one-click-install-image-to-manage-git-repositories\n[swap]: https://www.digitalocean.com/community/tutorials/how-to-add-swap-on-ubuntu-14-04\n[runner-do]: /blog/how-to-set-up-gitlab-runner-on-digitalocean/\n[glci]: /solutions/continuous-integration/ [cc]: https://creativecommons.org/licenses/by-nc/2.0/\n[update-doc]: http://doc.gitlab.com/omnibus/update/README.html#updating-using-the-official-repositories\n",[232],{"slug":9072,"featured":6,"template":678},"getting-started-with-gitlab-and-digitalocean","content:en-us:blog:getting-started-with-gitlab-and-digitalocean.yml","Getting Started With Gitlab And Digitalocean","en-us/blog/getting-started-with-gitlab-and-digitalocean.yml","en-us/blog/getting-started-with-gitlab-and-digitalocean",{"_path":9078,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9079,"content":9085,"config":9089,"_id":9091,"_type":16,"title":9092,"_source":17,"_file":9093,"_stem":9094,"_extension":20},"/en-us/blog/how-to-set-up-gitlab-runner-on-digitalocean",{"title":9080,"description":9081,"ogTitle":9080,"ogDescription":9081,"noIndex":6,"ogImage":9082,"ogUrl":9083,"ogSiteName":692,"ogType":693,"canonicalUrls":9083,"schema":9084},"How to set up GitLab Runner on DigitalOcean","In this tutorial we will explore how easy it is to install and set up your own Runner on DigitalOcean.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749684667/Blog/Hero%20Images/runners.jpg","https://about.gitlab.com/blog/how-to-set-up-gitlab-runner-on-digitalocean","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to set up GitLab Runner on DigitalOcean\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Achilleas Pipinellis\"}],\n        \"datePublished\": \"2016-04-19\",\n      }",{"title":9080,"description":9081,"authors":9086,"heroImage":9082,"date":9087,"body":9088,"category":14},[8273],"2016-04-19","\n\n### Introduction\n\nGitLab has [built-in continuous integration][doc-ci] to allow you to run a\nnumber of tasks as you prepare to deploy your software. Typical tasks\nmight be to build a software package or to run tests as specified in a\nYAML file. These tasks need to run by something, and in GitLab this something\nis called a [Runner][doc-runners]; an application that processes builds.\n\nIn March we introduced [GitLab Runner 1.1][1_1] with the cool feature of\nautoscaling and a week later we announced that all [shared Runners on GitLab.com\nuse autoscaling][autoscale-post]. The shared Runners can be used for free by\nany user with a GitLab.com account.\n\nIn this tutorial we will explore how easy it is to install and set up your own\nRunner on [DigitalOcean]; a Runner that will be 'specific' to your projects as\nwe say in the GitLab lingo.\n\n## Prerequisites\n\nWe will use the Docker executor since it has the most supported features\naccording to the [GitLab Runner executor compatibility chart][chart]. For this,\nwe will need to install Docker on the server that will host the GitLab Runner.\n\nFortunately, DigitalOcean has a [one-click image with Docker][one-docker]\npre-installed on Ubuntu 14.04 and this is what we will use.\n\n>**Note:**\nYou are free to use any Linux distribution supported by DigitalOcean, even\nFreeBSD. GitLab Runner is supported on all Operating Systems.\n\n## Create a droplet\n\n[Login to DigitalOcean][cloud] and from the Control Panel, click on the \"Create\nDroplet\" button that is visible from any page.\n\n![Create Droplet](https://about.gitlab.com/images/blogimages/how-to-set-up-gitlab-runner-on-digitalocean/create-droplet-shadow.png)\n\nUnder the \"Choose an image\" section, select the \"One-click Apps\" tab and click\nthe \"Docker\" image (the version might differ).\n\n![Droplet app](https://about.gitlab.com/images/blogimages/how-to-set-up-gitlab-runner-on-digitalocean/select-docker-shadow.png)\n\nThe next step is to choose the droplet size and the region you would like to use.\nYou are advised to use the **1 GB** / **1 CPU** droplet for quicker builds.\n\n![Hardware](https://about.gitlab.com/images/blogimages/how-to-set-up-gitlab-runner-on-digitalocean/hardware-shadow.png)\n\nAdd any SSH Keys, select any settings you'd like to use, and click \"Create\" at\nthe bottom.\n\n![Finalize creation](https://about.gitlab.com/images/blogimages/how-to-set-up-gitlab-runner-on-digitalocean/finalize-shadow.png)\n\nYour Docker droplet will be created and available in a few minutes!\n\n## Install the GitLab Runner\n\nFirst, login to the new droplet via SSH and verify that Docker is installed with:\n\n```bash\ndocker info\n```\n\nThis should show a bunch of information about the Docker version, the number of\nimages and containers, etc. With that set, we're ready to install GitLab\nRunner.\n\nGitLab provides a repository where you can easily install and update GitLab\nRunner. The supported distros as Debian, Ubuntu and CentOS. Let's install the\nrepository with this one-line:\n\n```bash\ncurl -L https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.deb.sh | sudo bash\n```\n\nFeel free to read the script before you execute it if you want.\n\nNow let's install GitLab Runner:\n\n```bash\nsudo apt-get install gitlab-runner\n```\n\nAnd verify it's installed:\n\n```bash\ngitlab-runner --version\n```\n\nExcellent! We're now ready to start using it.\n\n> **Note:**\nFor other installation methods and Operating Systems, see the\n[installation documentation][doc-install].\n\n## Register the GitLab Runner\n\nRegistering a Runner is the process of tying it with a specific GitLab project.\nEach project on GitLab has a unique token that is used by the Runner in order\nto be able to talk to GitLab via its API.\n\nWhen we installed GitLab Runner in the previous step, we installed GitLab Runner\nthe service. Each GitLab Runner service can spawn as many Runner processes you\nwant, so you can eventually register multiple Runners in a single droplet, each\nof which can be tied to a separate project.\n\nTo register a Runner we first need to know the project's token. Go to your\nnewly created project or pick one that already uses GitLab.com's shared Runners.\nNavigate to the project's **Settings > Runners** and notice that the shared\nRunners are enabled. In the left side you get detailed information on the steps\nneeded to register a new Runner.\n\n![Runners settings](https://about.gitlab.com/images/blogimages/how-to-set-up-gitlab-runner-on-digitalocean/runners-shadow.png)\n\nNow, let's get back to the droplet and start registering a Runner:\n\n```\nsudo gitlab-runner register\n```\n\nThe command above is interactive, so you will be asked the information needed to\nregister a new Runner.\n\n- **the gitlab-ci coordinator URL:** Enter `https://gitlab.com/ci`.\n- **the gitlab-ci token for this runner:** The token in the previous image.\n- **the gitlab-ci description for this runner:** This is for your own accord,\n  in case you have multiple Runners and want something to remind you what the\n  Runner is about. If you don't enter a description, the hostname of the\n  droplet will be used.\n- **the gitlab-ci tags for this runner:** You can use tags in your\n  `.gitlab-ci.yml` to limit jobs to specific Runners. Useful if for example\n  you are running tests on different OSes. Enter `docker,digitalocean` for this\n  example.\n- **enter the executor:** Our executor will be `docker`.\n- **enter the default Docker image:** The default Docker image that will be\n  used if you don't specify it in `.gitlab-ci.yml`.\n\nOnce answered all questions, you can verify that the Runner is registered with:\n\n```\nsudo gitlab-runner list\n```\n\nNow if you head back in your project's **Settings > Runners** you will see that\nthe Runner appeared in the list.\n\n![Runner](https://about.gitlab.com/images/blogimages/how-to-set-up-gitlab-runner-on-digitalocean/runner-shadow.png)\n\nYou can now start using this specific Runner for your project and you may\ndisable the shared Runners.\n\n",{"slug":9090,"featured":6,"template":678},"how-to-set-up-gitlab-runner-on-digitalocean","content:en-us:blog:how-to-set-up-gitlab-runner-on-digitalocean.yml","How To Set Up Gitlab Runner On Digitalocean","en-us/blog/how-to-set-up-gitlab-runner-on-digitalocean.yml","en-us/blog/how-to-set-up-gitlab-runner-on-digitalocean",{"_path":9096,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9097,"content":9103,"config":9107,"_id":9109,"_type":16,"title":9110,"_source":17,"_file":9111,"_stem":9112,"_extension":20},"/en-us/blog/a-brief-history-of-gitlab-workhorse",{"title":9098,"description":9099,"ogTitle":9098,"ogDescription":9099,"noIndex":6,"ogImage":9100,"ogUrl":9101,"ogSiteName":692,"ogType":693,"canonicalUrls":9101,"schema":9102},"A Brief History of GitLab Workhorse","A Brief History of GitLab Workhorse - in this blog post I will reflect on how we got there.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749684809/Blog/Hero%20Images/gopher.jpg","https://about.gitlab.com/blog/a-brief-history-of-gitlab-workhorse","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A Brief History of GitLab Workhorse\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jacob Vosmaer\"}],\n        \"datePublished\": \"2016-04-12\",\n      }",{"title":9098,"description":9099,"authors":9104,"heroImage":9100,"date":9105,"body":9106,"category":14},[3103],"2016-04-12","\nIn the past 8 months gitlab-workhorse, a 'weekend project' written in Go\ninstead of our preferred Ruby, grew from a tiny program that addressed\n`git clone` timeouts into a critical component that touches almost all\nHTTP requests to GitLab. In this blog post I will reflect on how we got\nhere.\n\n\u003C!-- more -->\n\n## Technical and personal motivations\n\nGitLab is a Ruby on Rails web application that uses the\n[Unicorn](http://unicorn.bogomips.org/) Ruby web server. I am a fan of\nUnicorn because it makes application resource leaks manageable and\nbecause it has served GitLab well for a long time by patching up\nproblems which we found or find too hard to solve 'properly'. (I am\nknown to growl at people who suggest swapping out Unicorn for another\nweb server in GitLab.)\n\nAt the same time, the design of Unicorn is incompatible with one of\nGitLab's main functions, namely Git repository access (`git clone`,\n`git push`, etc.) via HTTP(S). The reason it is incompatible is that\nUnicorn heavily relies on (relatively) short request timeouts. If you\nconfigure Unicorn to time out slowly rather than quickly then it starts\nto become a lot less pleasant to work with. A `git clone` on the other\nhand may take quite a long time if you are fetching a large Git\nrepository. In my previous role as a service engineer at GitLab I\nregularly had to explain this tension to customers. The only solution we\ncould offer was 'use Git over SSH instead'.\n\nAnother factor that led to gitlab-workhorse was my unfulfilled curiosity\nabout the [Go programming language](https://golang.org/). Go is\nsometimes credited with (or discredited for) having a strong marketing\npush behind it. The marketing worked on me: I have had a plush Go mascot\nstaring at me on my desk for almost three years now.\n\n![Gopher](https://about.gitlab.com/images/brief-history-of-gitlab-workhorse/gopher.jpg)\n\n## A weekend project gets merged into master\n\nSo one weekend in July last year I found myself with an itch to build\nsomething in Go and a lack of imagination which led me to ask myself:\ncould I rewrite\n[gitlab-grack](https://gitlab.com/gitlab-org/gitlab-grack), the GitLab\ncomponent that responds to Git HTTP clients, in Go? For the record I am\nnot proud of having used my own (non-work) time to drive development of\nthis project for the first few months, I think it sets a bad example for\nmyself and others. But that is how it went.\n\nThe result was a very short and mostly correct Go program called\n'gitlab-git-http-server' that suffered from none of the timeout issues\nthat `git clone` via Unicorn had. It integrated with GitLab in a sneaky\nway by letting NGINX divert Git HTTP requests away from Unicorn to\ngitlab-git-http-server. The required changes in the GitLab Rails\ncodebase were so minor that I could easily hide them behind a [feature\nflag](https://en.wikipedia.org/wiki/Feature_toggle). To top it off I\nannounced gitlab-git-http-server to the team on a day the CTO was on\nvacation: the ultimate sneak attack. Just kidding, but I thought it was\na funny coincidence about [Dmitriy](https://gitlab.com/dzaporozhets)'s\nvacation.\n\nThe team somehow let me 'try gitlab-git-http-server out' (read: merge it\ninto master and deploy it on our staging server) and so it got started.\nLess than a month later GitLab 7.14 shipped with gitlab-git-http-server\nbehind a feature flag. This allowed us to test it on GitLab.com for a\nmonth. Between our staging environment and GitLab.com we were able to\ncatch the worst bugs. In GitLab 8.0 (released the month after 7.14)\nwe made gitlab-git-http-server an official (and required!) component of\nGitLab.\n\nThe acceptance of gitlab-git-http-server by the team was probably helped\nby a shared understanding that GitLab's Git-over-HTTP solution was just\nnot quite cutting it, and by the fact that we already used Go for\n[gitlab-ci-multi-runner](https://gitlab.com/gitlab-org/gitlab-runner).\nBut there was no up-front decision to solve the problem of Git-over-HTTP\nat this particular time, or using these means.\n\n## Feature creep\n\nUntil now gitlab-git-http-server was a one-trick pony: it only handled\nGit HTTP clients. But as usually happens when you have a new hammer,\nother things started looking like nails. In GitLab 8.1 we changed\ngitlab-git-http-server to also handle the 'download zip' button from the\nGitLab web interface. In retrospect this seems obvious but at the time\nit felt like a big leap: in our minds, gitlab-git-http-server was a\ndaemon that understood (stateless) [HTTP Basic\nAuthentication](https://en.wikipedia.org/wiki/Basic_access_authentication)\nbut not the session cookies used by GitLab to identify individual users.\nBut a session cookie is just an HTTP header so nothing stopped\ngitlab-git-http-server from 'impersonating' a logged-in user and\ngenerating a zip file for them on the fly. I have a bit of a hard time\nexplaining now why but we thought this was very neat at the time.\n\n## Time for a new name\n\nIn GitLab 8.2 we wanted to ship two new features for which we expected\nthe same sort of Unicorn timeout problems that had plagued Git-over-HTTP\nin the past: [Git LFS](https://git-lfs.github.com/) support (developed\nby [Marin](https://gitlab.com/marin)) and [CI build\nartifacts](http://doc.gitlab.com/ce/ci/build_artifacts/README.html)\n(developed by [Kamil](https://gitlab.com/ayufan)). Both of these\nfeatures depended on users uploading and downloading arbitrarily large\nfiles.\n\nThis development brought many improvements to gitlab-git-http-server.\nFirst of all, the more people had to say or write\n'gitlab-git-http-server', the more obvious it became that the name was\ntoo awkward. And with all these new features it was also no longer\nappropriate, because the program did more than dealing with\nGit-over-HTTP. We have Marin to thank for coming up with\n'gitlab-workhorse' which I especially like because it pokes fun at\nUnicorn.\n\nIt was also a great development for gitlab-workhorse to be getting\nattention from Kamil because he is our resident Go expert. This was very\nwelcome: I felt confident enough that gitlab-workhorse functioned\ncorrectly, but I am not an experienced Go programmer. Having Kamil in\nthe game helped us make gitlab-workhorse a better Go program.\n\nFor a short while, Marin and I were on the one hand trying to implement\nfile uploads/downloads in gitlab-workhorse, while Kamil on the other\nhand was implementing the same thing for CI artifacts using NGINX\nplugins. Luckily we spotted the duplication of efforts before the code\nwent out the door so we were able to implement this in gitlab-workhorse\ntogether for GitLab 8.2.\n\nWe ended up with an especially nice solution for file downloads in\ngitlab-workhorse, inspired by the mechanism Kamil intended to use in\nNGINX: `X-Sendfile` headers. Most of the time when you want to use\ngitlab-workhorse to make something faster or more robust in GitLab you\nhave to write both Ruby code and Go code. But because [Ruby on Rails\nunderstands `X-Sendfile`\nalready](http://api.rubyonrails.org/classes/ActionController/DataStreaming.html#method-i-send_file),\nGitLab developers can reap the benefits of gitlab-workhorse for file\ndownloads without writing any Go code!\n\n## Betting the farm\n\nBy this time the success with which we could build new GitLab features\nby doing part of the work in gitlab-workhorse started to cause problems\nof its own. Each time we added a feature to gitlab-workhorse that meant\ndiverting more HTTP requests to gitlab-workhorse in the NGINX\nconfiguration. This complexity was hidden from people who installed\nGitLab using our [Omnibus packages](https://packages.gitlab.com/gitlab)\nbut I could tell from the gitlab-workhorse issue tracker that this was a\nrecurring source of problems for installations from source.\n\nPrior to gitlab-workhorse, NGINX served static files or forwarded\nrequests to Unicorn:\n\n    +----------+      +-------------+\n    |          |      |             |\n    |  NGINX   +----> |   Unicorn   |\n    |          |      |             |\n    +------+---+      +-------------+\n           |\n           |\n           |          +------------+\n           |          |            |\n           +--------> |   static   |\n                      |   files    |\n                      |            |\n                      +------------+\n\nNow with gitlab-workhorse in the picture, NGINX had to know which\nrequests to send to Unicorn, which to gitlab-workhorse, and which to\nstatic files.\n\n                    +--------------------+\n                    |                    |\n           +------> |  gitlab-workhorse  |\n           |        |                    |\n           |        +---------+----------+\n           |                  |\n           |                  v\n           |\n    +------+---+      +-------------+\n    |          |      |             |\n    |  NGINX   +----> |   Unicorn   |\n    |          |      |             |\n    +------+---+      +-------------+\n           |\n           |\n           |          +------------+\n           |          |            |\n           +--------> |   static   |\n                      |   files    |\n                      |            |\n                      +------------+\n\nKamil half-jokingly suggested at one point that we could route all HTTP\ntraffic to GitLab through gitlab-workhorse. Over time I started to\nbelieve this was a good idea: it would radically simplify the NGINX\nconfiguration for GitLab, and consequently make it easier to deploy\nGitLab behind other web servers (like Apache) which some people prefer\nstrongly to using NGINX. It seems the idea grew on Kamil too because we\nsoon saw a huge merge request from him to gitlab-workhorse which turned\nit into a 'smart proxy' that serves static files, injects error pages,\nimplements Git-over-HTTP plus other features, *and* proxies traffic to\nGitLab.\n\n    +-------------+         +---------------------+        +------------+\n    |             |         |                     |        |            |\n    |   NGINX     +-------> |  gitlab-workhorse   +------> |  Unicorn   |\n    |             |         |                     |        |            |\n    +-------------+         +---------------------+        +------------+\n\nThis change went out in GitLab 8.3. In little over four months\ngitlab-workhorse went from being a little helper daemon on the side to a\ntraffic cop that routes all HTTP requests going into GitLab.\n\nThis work immediately paid off in GitLab 8.4 when\n[Grzegorz](https://gitlab.com/grzesiek) added the CI artifact browsing\nfeature and GitLab 8.5 where we started serving 'raw' Git blobs via\ngitlab-workhorse. Neither of these changes forced GitLab administrators\nto update their NGINX configuration.\n\n## What to do next\n\nThe most important next step for gitlab-workhorse is to make it less\ndependent on me by getting more of my team members to contribute to the\ncode. I am trying to do this by purposely leaving some [nice\nchanges](https://gitlab.com/gitlab-org/gitlab-ce/issues/13999) for\nothers to implement.\n\nBecause of the gradual (and sneaky :) ) way gitlab-workhorse was added\nto GitLab we still have some technical debt in GitLab in the [Git HTTP\nauthentication / authorization\ncode](https://gitlab.com/gitlab-org/gitlab-ce/issues/14501). It would be\nnice to clean this up.\n\nFinally, it is sub-optimal that we still buffer Git pushes in NGINX\nbefore forwarding them to gitlab-workhorse. We could avoid this\nunncessary delay and give people who use Apache instead of NGINX a\nbetter experience if we [implement selective request buffering in\ngitlab-workhorse](https://gitlab.com/gitlab-org/gitlab-workhorse/issues/1#note_2681403).\n\nIf you want to know what is coming next in GitLab, check out our\n[Direction](/direction/) page, or follow developments on\nour latest milestones for 8.7 [CE](https://gitlab.com/gitlab-org/gitlab-ce/milestones/23)\nand [EE](https://gitlab.com/gitlab-org/gitlab-ee/milestones/9).\n",{"slug":9108,"featured":6,"template":678},"a-brief-history-of-gitlab-workhorse","content:en-us:blog:a-brief-history-of-gitlab-workhorse.yml","A Brief History Of Gitlab Workhorse","en-us/blog/a-brief-history-of-gitlab-workhorse.yml","en-us/blog/a-brief-history-of-gitlab-workhorse",{"_path":9114,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9115,"content":9121,"config":9126,"_id":9128,"_type":16,"title":9129,"_source":17,"_file":9130,"_stem":9131,"_extension":20},"/en-us/blog/tutorial-securing-your-gitlab-pages-with-tls-and-letsencrypt",{"title":9116,"description":9117,"ogTitle":9116,"ogDescription":9117,"noIndex":6,"ogImage":9118,"ogUrl":9119,"ogSiteName":692,"ogType":693,"canonicalUrls":9119,"schema":9120},"Tutorial: Securing your GitLab Pages with TLS and Let's Encrypt","In this post we will talk about HTTPS and how to add it to your GitLab Pages site with Let's Encrypt","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672214/Blog/Hero%20Images/altssh.jpg","https://about.gitlab.com/blog/tutorial-securing-your-gitlab-pages-with-tls-and-letsencrypt","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: Securing your GitLab Pages with TLS and Let's Encrypt\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Guest author André Miranda\"}],\n        \"datePublished\": \"2016-04-11\",\n      }",{"title":9116,"description":9117,"authors":9122,"heroImage":9118,"date":9124,"body":9125,"category":14},[9123],"Guest author André Miranda","2016-04-11","\n\nIn this post we will talk about HTTPS and how to add it to your GitLab Pages site\nwith [Let's Encrypt][letsencrypt].\n\n\u003C!-- more -->\n\n## Why TLS/SSL?\n\nWhen discussing HTTPS, it's common to hear people saying that a static\nwebsite doesn't need HTTPS, since it doesn't receive any POST requests, or isn't\nhandling credit card transactions or any other secure request.\nBut that's not the whole story.\n\nTLS ([formerly SSL][TLSwiki]) is a security protocol that can be added to HTTP\nto increase the security of your website by:\n\n1. properly authenticating yourself: the client can trust that you are really\n**you**. The TLS handshake that is made at the beginning of the connection\nensures the client that no one is trying to impersonate you;\n2. data integrity: this ensures that no one has tampered with the data in a\nrequest/response cycle;\n3. encryption: this is the main selling point of TLS, but the\nother two are just as important. This protects the privacy of the communication\nbetween client and server.\n\nThe TLS layer can be added to other protocols too, such as FTP (making it\n[FTPS](https://en.wikipedia.org/wiki/FTPS)) or WebSockets\n(making `ws://` [`wss://`](https://devcenter.heroku.com/articles/websocket-security#wss)).\n\n## HTTPS Everywhere\n\nNowadays, there is a strong push for using TLS on every website.\nThe ultimate goal is to make the web safer, by adding those three components\ncited above to every website.\n\nThe first big player was the [HTTPS Everywhere](https://www.eff.org/https-everywhere)\nbrowser extension. Google has also been using HTTPS compliance to better\nrank websites since [2014](https://webmasters.googleblog.com/2014/08/https-as-ranking-signal.html).\n\n## How to get TLS certificates\n\nIn order to add TLS to HTTP, one would need to get a certificate, and until 2015,\none would need to either pay for it or figure out how to do it with one of the\navailable [Certificate Authorities][certificateauthority].\n\nEnter [Let's Encrypt][letsencrypt], a free, automated, and open Certificate Authority.\nSince [December 2015][publicbeta] anyone can get a free certificate from this\nnew Certificate Authority from the comfort of their terminal.\n\n\n## Implementation\n\nSo, let's suppose we're going to create a static blog with [Jekyll 3][Jekyll].\nIf you are not creating a blog or are not using Jekyll just follow along, it\nshould be straightforward enough to translate the steps for different purposes.\nYou can also find many example projects using different static site generators\n(like Middleman or Hugo) in [GitLab's example projects][examplepages].\n\nA simple example blog can be created with:\n\n```shell\n$ jekyll new cool-blog\nNew jekyll site installed in ~/cool-blog.\n$ cd cool-blog/\n```\n\nNow you have to create a GitLab project. Here we are going to create a \"user\npage\", which means that it is a project created within a user account (not a\ngroup account), and that the name of the project looks like `YOURUSERNAME.gitlab.io`.\nRefer to the [\"Getting started\" section of the GitLab Pages manual][pagesdocs]\nfor more information on that.\n\nFrom now on, remember to replace `YOURDOMAIN.org` with your custom domain and\n`YOURUSERNAME` with, well, your username. ;)\n\n[Create a project] named `YOURUSERNAME.gitlab.io` so that GitLab will\nidentify the project correctly. After that, upload your code to GitLab:\n\n```\n$ git remote add origin git@gitlab.com:YOURUSERNAME/YOURUSERNAME.gitlab.io.git\n$ git push -u origin master\n```\n\nOK, so far we have a project uploaded to GitLab, but we haven't configured GitLab Pages yet.\nTo configure it, just create a `.gitlab-ci.yml` file in the root directory of your repository\nwith the following contents:\n\n```yaml\npages:\n  stage: deploy\n  image: ruby:2.3\n  script:\n    - gem install jekyll\n    - jekyll build -d public/\n  artifacts:\n    paths:\n      - public\n  only:\n    - master\n```\n\nThis file instructs GitLab Runner to `deploy` by installing Jekyll and\nbuilding your website under the `public/` folder\n(`jekyll build -d public/`).\n\nWhile you Wait for the build process to complete, you can track the progress in the\nBuilds page of your project. Once it starts, it probably won't take longer\nthan a few minutes. Once the build is finished, your website will be available at\n`https://YOURUSERNAME.gitlab.io`. Note that GitLab already provides TLS\ncertificates to all subdomains of `gitlab.io` (but it has some limitations, so\nplease [refer to the documentation for more][limitation]). So if you don't want to add a\ncustom domain, you're done.\n\n## How to configure the TLS certificate of your custom domain.\n\nOnce you buy a domain name and point that domain to your GitLab Pages website,\nyou need to configure 2 things:\n\n1. add the domain to GitLab Pages configuration ([see documentation][customdomain]);\n2. add your custom certificate to your website.\n\nOnce you add your domain, your website will be available under both\n`http://YOURDOMAIN.org` and `https://YOURUSERNAME.gitlab.io`.\n\nBut if you try to access your custom domain with `HTTPS`\n(`https://YOURDOMAIN.org` in this case), your browser will show that\nhorrible page, saying that things are going wrong and someone is trying to\nsteal your information. *Why is that?*\n\nSince GitLab offers TLS certificates to all `gitlab.io` pages\nand your custom domain is just a `CNAME` over that same domain, GitLab serves\nthe `gitlab.io` certificate, and your browser receives mixed messages: on one\nside, the browser is trying to access `YOURDOMAIN.org`, but on the other side\nit is getting a TLS certificate for `*.gitlab.io`,\nsignaling that something is wrong.\n\nIn order to fix it, you need to obtain a certificate for `YOURDOMAIN.org` and\nadd it to GitLab Pages. For that we are going to use\n[Let's Encrypt](https://letsencrypt.org/).\n\nLet's Encrypt is a new certificate authority that offers both *free* and\n*automated* certificates. That's perfect for us: we don't have to pay for\nhaving HTTPS and you can do everything within the comfort of your terminal.\n\nWe begin with downloading the `letsencrypt-auto` utility.\nOpen a new terminal window and type:\n\n```shell\n$ git clone https://github.com/letsencrypt/letsencrypt\n$ cd letsencrypt\n```\n\n`letsencrypt-auto` offers a lot of functionality. For example, if you have\na web server running Apache, you could add `letsencrypt-auto --apache` inside your\nwebserver and have everything done for you. `letsencrypt` targets primarily Unix-like\nwebservers, so the `letsencrypt-auto` tool won't work for Windows users. Check [this\ntutorial][letsencryptwindows] to see how to get Let's Encrypt certificates while running\nWindows.\n\nSince we are running on GitLab's servers instead, we have to do a bit of manual\nwork:\n\n```shell\n$ ./letsencrypt-auto certonly -a manual -d YOURDOMAIN.org\n#\n# If you want to support another domain, www.YOURDOMAIN.org, for example, you\n# can add it to the domain list after -d like:\n# ./letsencrypt-auto certonly -a manual -d YOURDOMAIN.org www.YOURDOMAIN.org\n#\n```\n\nAfter you accept that your IP will be publicly logged, a message like the\nfollowing will appear:\n\n```shell\nMake sure your web server displays the following content at\nhttp://YOURDOMAIN.org/.well-known/acme-challenge/5TBu788fW0tQ5EOwZMdu1Gv3e9C33gxjV58hVtWTbDM\nbefore continuing:\n\n5TBu788fW0tQ5EOwZMdu1Gv3e9C33gxjV58hVtWTbDM.ewlbSYgvIxVOqiP1lD2zeDKWBGEZMRfO_4kJyLRP_4U\n\n#\n# output omitted\n#\n\nPress ENTER to continue\n```\n\nNow it is waiting for the server to be correctly configured so it can go on.\nLeave this terminal window open for now.\n\nSo, the goal is to the make our already-published static website return\nsaid token when said URL is requested. That's easy: create a custom\npage! Just create a file in your blog folder that looks like this:\n\n```markdown\n---\nlayout: null\npermalink: /.well-known/acme-challenge/5TBu788fW0tQ5EOwZMdu1Gv3e9C33gxjV58hVtWTbDM.html\n---\n\n5TBu788fW0tQ5EOwZMdu1Gv3e9C33gxjV58hVtWTbDM.ewlbSYgvIxVOqiP1lD2zeDKWBGEZMRfO_4kJyLRP_4U\n```\n\nThis tells Jekyll to create a static page, which you can see at\n`cool-blog/_site/.well-known/acme-challenge/5TBu788fW0tQ5EOwZMdu1Gv3e9C33gxjV58hVtWTbDM.html`,\nwith no extra HTML, just the token in plain text. As we are using the `permalink` attribute in the\nfront matter, you can name this file anyway you want and put it anywhere, too.\n Note that the behaviour of the `permalink` attribute has\n[changed][jekyllversion] from Jekyll 2 to Jekyll 3, so make sure you have Jekyll 3.x installed.\nIf you're not using version 3 of Jekyll or if you're using a different tool,\njust create the same file in the exact path, like\n`cool-blog/.well-known/acme-challenge/5TBu788fW0tQ5EOwZMdu1Gv3e9C33gxjV58hVtWTbDM.html`\nor an equivalent path in your static site generator of choice.\nHere we'll call it `letsencrypt-setup.html` and place it in the root folder\nof the blog. In order to check that everything is working as expected, start a local server with `jekyll serve` in a separate terminal window and try to access the URL:\n\n```shell\n$ curl http://localhost:4000/.well-known/acme-challenge/5TBu788fW0tQ5EOwZMdu1Gv3e9C33gxjV58hVtWTbDM\n# response:\n5TBu788fW0tQ5EOwZMdu1Gv3e9C33gxjV58hVtWTbDM.ewlbSYgvIxVOqiP1lD2zeDKWBGEZMRfO_4kJyLRP_4U\n```\n\nNote that I just replaced the `http://YOURDOMAIN.org` (from the\n`letsencrypt-auto` instructions) with `http://localhost:4000`.\nEverything is working fine, so we just need to upload the new file to GitLab:\n\n```\n$ git add letsencrypt-setup.html\n$ git commit -m \"add letsencypt-setup.html file\"\n$ git push\n```\n\nOnce the build finishes, test again if everything is working well:\n\n```shell\n# Note that we're using the actual domain, not localhost anymore\n$ curl http://YOURDOMAIN.org/.well-known/acme-challenge/5TBu788fW0tQ5EOwZMdu1Gv3e9C33gxjV58hVtWTbDM\n```\n\nIf you get a `404 page not found`, check if you missed any step, or get in touch\nin the comments below.\n\nNow that everything is working as expected, go back to the terminal window\nthat's waiting for you and hit `ENTER`. This instructs the Let's Encrypt's\nservers to go to the URL we just created. If they get the response they were waiting for,\nwe've proven that we actually own the domain and now they'll send you the\nTLS certificates. After a while it responds:\n\n```\nIMPORTANT NOTES:\n - Congratulations! Your certificate and chain have been saved at\n   /etc/letsencrypt/live/YOURDOMAIN.org/fullchain.pem. Your cert will\n   expire on 2016-07-04. To obtain a new version of the certificate in\n   the future, simply run Let's Encrypt again.\n - If you like Let's Encrypt, please consider supporting our work by:\n\n   Donating to ISRG / Let's Encrypt:   https://letsencrypt.org/donate\n   Donating to EFF:                    https://eff.org/donate-le\n```\n\nSuccess! We have correctly acquired a free TLS certificate for our domain!\n\nNote, however, that like any other TLS certificate, it has an expiration date,\nand in the case of certificates issued by Let's Encrypt, the certificate will\nremain valid for 90 days. When you finish setting up, just put in your calendar to\nremember to renew the certificate in time, otherwise it will become invalid,\nand the browser will reject it.\n\nNow we just need to upload the certificate and the key to GitLab.\nGo to **Settings** -> **Pages** inside your project, remove the old `CNAME` and\nadd a new one with the same domain, but now you'll also upload the TLS\ncertificate. Paste the contents of `/etc/letsencrypt/live/YOURDOMAIN.org/fullchain.pem`\n(you'll need `sudo` to read the file) to the \"Certificate (PEM)\"\n field and `/etc/letsencrypt/live/YOURDOMAIN.org/privkey.pem` (also needs `sudo`) to the\n\"Key (PEM)\" field.\n\n![Uploading the certificate to GitLab Pages](https://about.gitlab.com/images/blogimages/gitlab-pages-cert-upload-screenshot.png)\n\nAnd you're done! You now have a fully working HTTPS website:\n\n```shell\n$ curl -vX HEAD https://YOURDOMAIN.org/\n#\n# starting connection\n#\n* TLS 1.2 connection using TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\n* Server certificate: YOURDOMAIN.org\n* Server certificate: Lets Encrypt Authority X3\n* Server certificate: DST Root CA X3\n```\n\n## How to redirect all traffic to the HTTPS version\n\nEverything is working fine, but now we have an extra concern: we have two\nworking versions of our website, both HTTP **and** HTTPS. We need a way to\nredirect all of our traffic to the HTTPS version, and tell search engines to\ndo the same.\n\n### How to tell search engines which is the correct version\n\nInstructing the search engines is really easy: just tell them that the HTTPS\nversion is the \"canonical\" version, and they send all the users to it.\nAnd how do you do that? By adding a `link` tag to the header of the HTML:\n\n```html\n\u003Clink rel=\"canonical\" href=\"https://YOURDOMAIN.org/specific/page\" />\n```\n\nAdding this to every header on a blog tells the search engine that the correct\nversion is the HTTPS one, and they'll comply.\n\n### Internal links\n\nRemember to use HTTPS for your CSS or JavaScript file URLs, because when the\nbrowser accesses a secure website that relies on an insecure resource, it may\nblock that resource.\n\nIt is [considered a good practice][relativeprotocol] to use the protocol-agnostic path:\n\n```\n\u003Clink rel=\"stylesheet\" href=\"//YOURDOMAIN.org/styles.css\" />\n\u003Cscript src=\"//YOURDOMAIN.org/script.js\">\u003C/script>\n```\n\n### When to use JavaScript-based redirect\n\nThere is, however, a case where the user specifically types in the URL\n**without** using HTTPS, and they'll access the HTTP version of your website.\n\nThe correct way of handling that would be to respond with a 301 \"Moved\npermanently\" HTTP code, and the browser would remember it for the next request.\nHowever, that's not a possibility we have here, since we're running on GitLab's servers.\n\nA small hack you can do is to redirect your users with a bit of JavaScript code:\n\n```javascript\nvar host = \"YOURDOMAIN.org\";\nif ((host == window.location.host) && (window.location.protocol != 'https:')) {\n  window.location = window.location.toString().replace(/^http:/, \"https:\");\n}\n```\n\nThis redirects the user to the HTTPS version, but there are a few problems with it:\n\n1. a user could have JavaScript disabled, and would not be affected by that;\n2. an attacker could simply remove that code and behave as a [Man in the Middle][middleattack];\n3. the browser won't remember the redirect instruction, so every time the user types\nthat same URL, the website will have to redirect him/her again.\n\n## Wrap up\n\n![a working certificate screenshot](https://about.gitlab.com/images/blogimages/working-certificate-screenshot.png)\n\nThat's how easy it is to have a free HTTPS-enabled website.\nWith these tools, I see no reason not to do it.\n\nIf you want to improve GitLab's support for Let's Encrypt, you can\ndiscuss and contribute in issues [#474][issue474], [#467][issue467] and\n[#472][issue472] from GitLab EE. They are open to merge requests!\n\nThere's an [excellent talk][talk] by [Pierre Far][pierretwitter] and\n[Ilya Grigorik][ilyatwitter] on HTTPS where you can learn more\nabout it.\n\nIf you want to check the status of your HTTPS enabled website,\n[SSL Labs offers a free online service][ssltest] that\n\"performs a deep analysis of the configuration of any SSL web server on the\npublic Internet\".\n\nThis article is based on [Paul Wakeford's post][wakeford].\n\nI hope it helps you :)\n\n[Create a project]: https://docs.gitlab.com/ee/user/project/working_with_projects.html#create-a-project\n[Jekyll]: https://jekyllrb.com/\n[examplepages]: https://gitlab.com/groups/pages\n[pagesdocs]: http://doc.gitlab.com/ee/pages/README.html#getting-started-with-gitlab-pages\n[TLSwiki]: https://en.wikipedia.org/wiki/Transport_Layer_Security#TLS_1.0\n[letsencrypt]: https://letsencrypt.org/\n[wakeford]: https://www.paulwakeford.info/2015/11/24/letsencrypt/\n[publicbeta]: https://letsencrypt.org/2015/12/03/entering-public-beta.html\n[ssltest]: https://www.ssllabs.com/ssltest/\n[middleattack]: https://en.wikipedia.org/wiki/Man-in-the-middle_attack\n[talk]: https://www.youtube.com/watch?v=cBhZ6S0PFCY\n[relativeprotocol]: http://www.paulirish.com/2010/the-protocol-relative-url/\n[jekyllversion]: https://jekyllrb.com/docs/upgrading/2-to-3/#permalinks-no-longer-automatically-add-a-trailing-slash\n[letsencryptwindows]: https://cultiv.nl/blog/lets-encrypt-on-windows/\n[customdomain]: http://doc.gitlab.com/ee/pages/README.html#add-a-custom-domain-to-your-pages-website\n[certificateauthority]: https://en.wikipedia.org/wiki/Certificate_authority\n[limitation]: http://doc.gitlab.com/ee/pages/README.html#limitations\n[issue474]: https://gitlab.com/gitlab-org/gitlab-ee/issues/474\n[issue472]: https://gitlab.com/gitlab-org/gitlab-ee/issues/472\n[issue467]: https://gitlab.com/gitlab-org/gitlab-ee/issues/467\n[pierretwitter]: https://twitter.com/pierrefar\n[ilyatwitter]: https://twitter.com/igrigorik\n",{"slug":9127,"featured":6,"template":678},"tutorial-securing-your-gitlab-pages-with-tls-and-letsencrypt","content:en-us:blog:tutorial-securing-your-gitlab-pages-with-tls-and-letsencrypt.yml","Tutorial Securing Your Gitlab Pages With Tls And Letsencrypt","en-us/blog/tutorial-securing-your-gitlab-pages-with-tls-and-letsencrypt.yml","en-us/blog/tutorial-securing-your-gitlab-pages-with-tls-and-letsencrypt",{"_path":9133,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9134,"content":9140,"config":9144,"_id":9146,"_type":16,"title":9147,"_source":17,"_file":9148,"_stem":9149,"_extension":20},"/en-us/blog/gitlab-pages-setup",{"title":9135,"description":9136,"ogTitle":9135,"ogDescription":9136,"noIndex":6,"ogImage":9137,"ogUrl":9138,"ogSiteName":692,"ogType":693,"canonicalUrls":9138,"schema":9139},"Hosting on GitLab.com with GitLab Pages","Learn how to host your website on GitLab.com with GitLab Pages","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671069/Blog/Hero%20Images/gitlab-pages-setup-cover.jpg","https://about.gitlab.com/blog/gitlab-pages-setup","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Hosting on GitLab.com with GitLab Pages\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Marcia Ramos\"}],\n        \"datePublished\": \"2016-04-07\",\n      }",{"title":9135,"description":9136,"authors":9141,"heroImage":9137,"date":9142,"body":9143,"category":14},[8399],"2016-04-07","\nIn this article we provide you with detailed information about using [GitLab Pages][pages] to\nhost your website for free on [GitLab.com][sign-up].\n\nWe've prepared a step-by-step tutorial on creating a new project for GitLab Pages so you won't get lost in the process.\n\nGitLab Pages supports [static websites][wiki-static-websites] and builds **any** [Static Site Generator (SSG)][SSGs],\nsuch as [Jekyll], [Hugo], [Hexo], [Middleman] and [Pelican].\n\nWe are assuming that you are familiar with [Git][git] and with the web development process, from creation to publishing.\n\n\u003C!-- more -->\n\n----------\n\n### What's in this tutorial?\n{: .no_toc}\n\n- TOC\n{:toc}\n\n----\n\n## Getting Started\n\nThere are two ways of getting started with GitLab Pages: either you fork an existing project, or you create a new one for yourself.\n\nOn the [GitLab Pages Quick Start Guide][pages], which is, by the way, a site built with [Middleman],\nyou will find the steps for forking an existing project from a list of examples prepared for you.\nThere are some popular SSGs, like Jekyll, Hugo, Hexo, Brunch, etc.\n\nHowever, if you want to understand the process of creating a new project from scratch, this post is for you.\n\nOn the official documentation you can learn about [GitLab Pages][pages-work], but here we will focus on the **steps**\nfor creating your own project.\n\n**Note:** [GitLab Pages was introduced in GitLab EE 8.3][pages-introduced].\nIt is available for [GitLab.com][sign-up] and [GitLab Enterprise Edition][gitlab-ee] users.\nUpdate: [GitLab 8.17](/releases/2017/02/22/gitlab-8-17-released/#gitlab-pages-in-community-edition)\nbrought GitLab Pages to GitLab Community Edition! Enjoy!\n{: .note}\n\n## Website Types\n\nIn general, you are allowed to create and host two sorts of websites with GitLab Pages:\n\n1. User/Group Websites - a single site per user or group\n1. Project Websites - as many sites you want\n\nYou can find out more about them on the [docs][pages-work].\n\n### A note regarding GitLab Groups and Namespaces\n{: #group-websites}\n\nCreating a [group][doc-groups] on GitLab is very useful when you have several projects with the same subject.\nA group has its own **namespace**, which is unique for each group (and for each user).\n\nThe important matter is, when you create your account on GitLab, it's better to choose a username for\nyourself as a person, not as a company. You can create your \"company username\" later, as a group\nnamespace. For example, let's say your name is \"John Doe\" and your company is called \"Foo Main\".\nFirst, register yourself as `johndoe` and later create a group called Foo Main within the namespace of\n`foomain`. This will allow you to separate your personal projects from your company ones.\n\nIf you follow this tip, you will be able to access your personal site under `https://username.gitlab.io`\nand your company site under `https://groupname.gitlab.io`.\n\n## About GitLab CI for GitLab Pages\n{: #gitlab-ci}\n\nThe key to having everything up and running as expected is the [**GitLab CI** configuration file][doc-ciconfig], called `.gitlab-ci.yml`.\n\nThis file [configures][ee-yaml-ci] how your website will be built by a _[Runner][doc-config-runners]_.\nIt is written in [YAML], which has its own syntax, so we recommend you\nfollow this [quick start guide] before setting it up.\nIt needs to be placed at your root directory.\n\nThe most important fact is that with [GitLab CI](/solutions/continuous-integration/), **you** take control over your builds.\nThey won't be in an invisible black box where you don't know what is going on!\nYou can actually **see** any build running live by navigating to your project's **Pipelines > Builds > Build ID**.\nYou can also add any command to your script. This is far beyond useful as it allows you to do\npretty much anything you do on your local machine!\n\nFor example, you can add any [Jekyll Plugin] to your Jekyll site,\nyou can require any `gem` you need in your `Gemfile`, run `npm`, run `bundle` and much more.\nBottom line, it's as handy as having your own command line on your GitLab UI.\n\nAdditionally, you can have a distinct `.gitlab-ci.yml` for each repository - even for each branch.\nThis means you can test your script in parallel branches before pushing to your `main` branch.\nIf the build succeeds, you merge. If it doesn't, you can make adjustments and try building\nagain without messing up your `main` branch.\n\nBefore you push any `.gitlab-ci.yml` to your project, you can\nvalidate its syntax with the tool called [CI Lint][ci-lint].\nYou need to be logged into your account to have access to this tool.\nIt's found by navigating to your project's **Pipelines**: there is a button at the top-right of your screen.\n\n![CI-Lint](https://about.gitlab.com/images/blogimages/gitlab-pages/gitlab-ci-lint.png)\n\nYou can read through the [full documentation for `.gitlab-ci.yml`][ee-yaml-ci] for more information.\n\n## Creating new GitLab Pages projects\n{: #creating-new-pages-projects}\n\nHere is an overview of the steps we'll take, assuming you already have your GitLab.com account:\n\n1. Create a new project\n1. Add the configuration file (`.gitlab-ci.yml`)\n1. Upload your website content\n1. Add your custom domain _(optional)_\n1. Done!\n\n## Step-by-step\n\nNow we will go through this process step-by-step. Update: watch the video tutorial on\n[How to Publish a Website with GitLab Pages on GitLab.com from a forked project](https://youtu.be/TWqh9MtT4Bg)!\n\n### Step 1: Create a new project\n{: #creating-new-project}\n\nThis is as straight-forward as you can imagine:\n\n- On your **dashboard** you will see a big green button called **+ New Project**. Click on it.\n- Set the first things up:\n   - **Project path** - your project's name, accessed via `https://gitlab.com/namespace/projectname`\n   - **Privacy** - choose if you want your project to be visible and accessible just for you (`private`),\n   just for GitLab.com users (`internal`) or free to anyone to view, clone, fork and download it (`public`)\n\n**Note**: you can host your website on [GitLab.com][gitlab-com] even if it is stored in a private repository.\nIf you do so, you can have your project protected - only the static site will be visible\nto the public - via \"Inspect Element\" or \"View-Source\" from their web browsers.\n{: .note}\n\n### Step 2: Add the configuration file: `.gitlab-ci.yml`\n{: #add-gitlab-ci}\n\nNow we can have some fun! Let's tell GitLab CI how to build the site.\nYou will see a few examples below (options A, B, and C) to understand how they work.\n\n### Option A: GitLab CI for plain HTML websites\n{: .no_toc}\n\nIn order to build your [plain HTML site][pages-ci-html] with GitLab Pages,\nyour `.gitlab-ci.yml` file doesn't need much:\n\n```yaml\npages:\n  stage: deploy\n  script:\n  - mkdir .public\n  - cp -r * .public\n  - mv .public public\n  artifacts:\n    paths:\n    - public\n  only:\n  - main\n```\n\nWhat this code is doing is creating a _[job][doc-jobs]_ called _[pages][doc-contents-ciconfig]_\ntelling the _[Runner][doc-shared-runners]_ to _[deploy][doc-stages]_ the website _[artifacts][doc-artifacts]_\nto a _[public path][doc-contents-ciconfig]_,\nwhenever a commit is pushed _[only][doc-only]_ to the `main` branch.\n\nAll pages are created after the build completes successfully\nand the artifacts for the pages job are uploaded to GitLab.\n\n### Option B: GitLab CI for Jekyll websites\n{: .no_toc}\n\nJekyll is so far the most popular [Static Site Generator (SSG)][SSGs] available, that's why we'll use it as a first example\nfor configuring our GitLab CI. On the next section you'll find more [examples](#examples) for SSGs already tested with GitLab Pages.\n\nJekyll is written in [Ruby] and generates static blog-aware websites.\nBlog-aware means a website generator will create blog-style content, such as lists of\ncontent in reverse chronological order, archive lists, and\nother common blog-style features.\n\nWe can write dynamically with [Liquid], [Markdown] and [YAML] and\nJekyll builds the static site (HTML, CSS, JS) for us.\nYou will find the same functionality for every SSG,\nyet each of them uses its own environment, template system, markup language, etc.\n\nIf you want GitLab Pages to [build your Jekyll website][pages-ci-jekyll],\nyou can start with the simple script below:\n\n```yaml\nimage: ruby:2.1\n\npages:\n  script:\n  - gem install jekyll\n  - jekyll build -d public/\n  artifacts:\n    paths:\n    - public\n  only:\n  - main\n```\n\nThis code requires the _[script][doc-script]_ to run on\nthe _[environment][doc-images]_ of [Ruby] 2.1.x,\ninstalls the Jekyll gem, and builds the site\nto the _[public path][doc-contents-ciconfig]_.\nThe result affects _[only][doc-only]_ the main branch.\nFor building a regular Jekyll site, you can just\ncopy this code and paste it into your `.gitlab-ci.yml`.\n\nIf you are familiar with Jekyll, you will probably want to use [Bundler] to build your Jekyll site.\nWe've prepared an [example][jekyll-proj] for that. Also, if you want to use a specific Jekyll version, you can\nfind an [example][jekyll-253-example] in the [Jekyll Themes][jekyll-examples]\ngroup I set up for the purposes of this post.\nAnd of course, since you are the one who controls how GitLab CI builds your site,\nyou are free to use any [Jekyll Plugins][Jekyll Plugin]. _Yep!_\n\n### Option C: GitLab CI for Hexo websites\n{: .no_toc}\n\nLet's see another example. [Hexo] is a powerful blog-aware framework built with [NodeJS][node],\na server-side JavaScript environment based on [Google V8] high-performance engine.\n\nTo build our Hexo site, we can start with this `.gitlab-ci.yml`:\n\n```yaml\nimage: node:4.2.2\n\npages:\n  cache:\n    paths:\n    - node_modules/\n\n  script:\n  - npm install hexo-cli -g\n  - npm install\n  - hexo deploy\n  artifacts:\n    paths:\n    - public\n  only:\n    - main\n```\n\nNote that the [Docker image][node-422] we require is `node:4.2.2`.\nWe are archiving `npm` modules into the `cache`, installing `hexo-cli` and deploying\nour `hexo` site to the default `public` directory, uploaded to GitLab as `artifacts`.\nThe `pages` job is `only` affecting the `main` branch.\n\nOn the [Pages][ci-examples] group you will find a default [Hexo site][pages-hexo]\ndeployed with GitLab Pages, and on [this group][themes-templates], another [example][hexo-proj]\nwith a slightly different configuration.\n\n### Step 3: Upload your website content\n{: #upload-content}\n\nPush the content to your remote project and keep an eye on the build!\n\n**Don't forget:** when you are using GitLab Pages with a Static Site Generator,\ndo not upload the directory which your SSG generated locally,\notherwise you'll have duplicated contents and you might face build errors.\nFor example, do not commit the `_site` directory ([Jekyll]) or the `build` directory\n([Middleman]) or the `public` directory ([Hexo]). You can do this automatically by adding\nthem to a `.gitignore` file, placed at your project's root directory.\n\nE.g., if you are building a Jekyll site, your `.gitignore` will have this line:\n\n```\n_site\n```\n\nA `.gitignore` is very useful to avoid uploading to your remote repository any file or folder within your project.\nIf you want to know more about it, check the [`.gitignore` official docs][git-docs-gitignore].\n\n### Step 4: Add your custom domain\n{: #custom-domains}\n\n**Note:** Custom CNAMEs with TLS support were introduced in [GitLab EE 8.5][EE-85].\n{: .note}\n\nIf you want, you are free to [add your own domain(s) name][pages-custom-domain] to your website hosted by GitLab.com.\n\nIt's not required though, you can always use the standard\n[GitLab Pages default domain names](https://docs.gitlab.com/ee/user/project/pages/getting_started_part_one.html#gitlab-pages-default-domain-names).\n\n_Features_\n\n- Besides including your own domain, you can add your custom **subdomain** to your GitLab Pages project (e.g., `subdomain.example.com`)\n- You can enter more than one domain alias **per project** (e.g., `example.com`,\n`example.net` `my.example.org` and `another-example.com` pointing to your project under `mynamespace.gitlab.io` or\n`mynamespace.gitlab.io/myproject`). A domain alias is like having multiple front doors to one location.\n- If you want to enable an HTTPS secure connection to your domains, you can affix your own SSL/TLS digital\ncertificate to **each** custom domain or subdomain you've added to your projects.\n\n_Steps to set up a custom domain_\n\n- From your project's dashboard, go to **Settings** (\u003Ci class=\"fas fa-cog\" aria-hidden=\"true\">\u003C/i>) **> Pages > New Domain**\n- Add your domain to the first field: `mydomain.com`\n- If you have an SSL/TLS digital certificate and its key, add them to their respective fields.\nIf you don't, just leave the fields blank.\n- Click on **Create New Domain**.\n- Finally, access your domain control panel and create a new [DNS `A` record][dns-A] pointing\nto the [IP of GitLab Pages server][pages-settings]:\n\n```\nmydomain.com A 35.185.44.232\n```\n\n**Note:** This GitLab Pages IP address for GitLab.com changed from `52.167.214.135` to `35.185.44.232` in August 2018.\n{:.note}\n\nAlternatively, a similar procedure can be applied for **custom subdomains**:\n\n- Add the subdomain to the first field: `subdomain.mydomain.com`\n\n- Then create a new [DNS `CNAME` record][dns-cname] pointing to `myusername.gitlab.io`:\n\n```\nsubdomain.mydomain.com CNAME myusername.gitlab.io\n```\n\nRepeat these steps for any additional domain aliases.\n\nNote that how you set up your DNS records will depend upon which company you\nused to register your domain name. Every company has its own methods for DNS Zone Management.\nOn this link you can find an [overview for some providers][dns-zone-examples],\nit might help you to follow through. Please contact your provider directly if you need some extra help.\n\nOrdinarily, DNS propagation needs some time to take effect, so don't worry if you can't access your\nwebsite under your custom domain instantaneously. Wait a few minutes and check it again.\n\n## Examples\n\nCheck out the [Pages][ci-examples] official group for a list of example projects,\nwhere you can explore some good options for Static Site Generators for Ruby, NodeJS and Python environments.\nYou can also find more specific examples on the following groups, which I prepared for the purposes of this post:\n\n- [Jekyll Themes][jekyll-examples] (Ruby/Jekyll)\n- [Middleman Themes][middle-examples] (Ruby/Middleman)\n- [Themes and Templates][themes-templates] (Miscellaneous)\n- [HTML Themes][html-examples] (plain HTML)\n\n**Note:** these themes, templates and SSGs were casually chosen and listed on this\npost to provide you with some distinct GitLab CI configurations.\n{: .note}\n\n## FAQ\n\n### Is all of this really free to use?\n{: .no_toc}\n\nYes, it is! On [GitLab.com][sign-up] you can create your free account\nand enjoy all its [features][gitlab-com], including unlimited private repositories,\nprojects, websites, and contributors. Also, you'll have 10GB disk space per project, [1GB per Pages artifacts][pages-settings],\nand unlimited total disk space. Awesome, isn't it? Why don't you take a peek at the [public projects][explore]?\n\n### Where is the `public` folder?\n{: .no_toc}\n\nWhen a build succeeds, you'll find your static site at your project's **Pipelines > Builds > Build ID > Browse**.\nYou can download the artifacts from the same screen.\n\n![Build Artifacts - Browse or Download](https://about.gitlab.com/images/blogimages/gitlab-pages/gitlab-browse-download-artifacts.png)\n\n### Can I really use any Static Site Generator?\n{: .no_toc}\n\nYes, you can use any [Static Site Generator][SSGs] available.\n\n### Can I use free SSL/TLS digital certificates?\n{: .no_toc}\n\nYes, absolutely! Need a suggestion? Try [Let's Encrypt][lets-encrypt] or [Cloudflare].\n\n### Can I contribute to the themes?\n{: .no_toc}\n\nSure! You are very welcome to contribute to the groups mentioned above.\nTo do that, please set your website up and make sure it's working as you expected.\nThen, add an issue to the [group](#examples) you're interested in. Don't forget to include a link to your project.\nAfter a brief evaluation, we'll be glad to fork your project and present your theme to our community!\n\n### Can I use `.php` pages and connect databases with my sites?\n{: .no_toc}\n\nNo. GitLab Pages hosts static websites only (HTML, CSS and JS).\n\n## Getting Help\n\nIf you need some help regarding GitLab Pages on GitLab.com,\nfeel free to use one of [our channels][get-help]. You can also open an issue on the [Pages][pages-issues] group.\n\n\u003Ca name=\"conclusions\">\u003C/a>\n\n## Conclusion\n{: #conclusions}\n\nHopefully now you understand how **[GitLab Pages][pages]** works and how to create your new site.\n\nFollow [@GitLab][twitter] on Twitter and stay tuned for updates!\n\nWe're looking forward to seeing your sites!\n\n**Note:** this post has been updated (June 17th, 2016) to match the new GitLab UI.\n{: .note}\n\n## About guest author Marcia Ramos\n{: .no_toc}\n\n[Marcia](https://gitlab.com/marcia) is a backend web developer specialized in WordPress and Jekyll sites at [Virtua Creative],\nthough she does some frontend too. Her daily work is based on version-controlled systems for almost 15 years.\nShe is driven by her thirst for knowledge and her eagerness to continuously expand her horizons.\nWhen she is not coding, she is writing articles, studying, teaching or contributing to open source projects here and there.\nUpdate: she's joined the [GitLab Team] in May, 2016.\n\n[doc-artifacts]: http://doc.gitlab.com/ee/ci/yaml/README.html#artifacts\n[doc-ciconfig]: http://doc.gitlab.com/ee/ci/quick_start/README.html#creating-a-.gitlab-ci.yml-file\n[doc-config-runners]: http://doc.gitlab.com/ee/ci/quick_start/README.html#configuring-a-runner\n[doc-contents-ciconfig]: http://doc.gitlab.com/ee/pages/README.html#explore-the-contents-of-.gitlab-ci.yml\n[doc-groups]: http://doc.gitlab.com/ee/workflow/groups.html\n[doc-images]: http://doc.gitlab.com/ee/ci/yaml/README.html#image-and-services\n[doc-jobs]: http://doc.gitlab.com/ce/ci/yaml/README.html#jobs\n[doc-only]: http://doc.gitlab.com/ee/ci/yaml/README.html#only-and-except\n[doc-runners]: http://doc.gitlab.com/ee/ci/runners/README.html#sts=Runners\n[doc-script]: http://doc.gitlab.com/ee/ci/yaml/README.html#script\n[doc-shared-runners]: http://doc.gitlab.com/ee/ci/quick_start/README.html#shared-runners\n[doc-stages]: http://doc.gitlab.com/ce/ci/yaml/README.html#stages\n[ee-yaml-ci]: http://doc.gitlab.com/ee/ci/yaml/README.html\n[pages]: https://pages.gitlab.io\n[pages-ee]: http://doc.gitlab.com/ee/pages/README.html\n[pages-introduced]: /2016/04/04/gitlab-pages-get-started/\n[pages-issues]: https://gitlab.com/pages/pages.gitlab.io/issues\n[pages-work]: http://doc.gitlab.com/ee/pages/README.html#getting-started-with-gitlab-pages\n[pages-user]: http://doc.gitlab.com/ee/pages/README.html#user-or-group-pages\n[pages-project]: http://doc.gitlab.com/ee/pages/README.html#project-pages\n[pages-ci-html]: http://doc.gitlab.com/ee/pages/README.html#how-.gitlab-ci.yml-looks-like-when-the-static-content-is-in-your-repository\n[pages-ci-jekyll]: http://doc.gitlab.com/ee/pages/README.html#how-.gitlab-ci.yml-looks-like-when-using-a-static-generator\n[pages-custom-domain]: http://doc.gitlab.com/ee/pages/README.html#add-a-custom-domain-to-your-pages-website\n[pages-settings]: https://docs.gitlab.com/ee/user/gitlab_com/#gitlab-pages\n[quick start guide]: http://doc.gitlab.com/ee/ci/quick_start/README.html\n\n[about-gitlab-com]: /\n[ci-lint]: https://gitlab.com/ci/lint \"Try me!\"\n[cname-issue]: https://gitlab.com/gitlab-org/gitlab-ee/issues/134\n[ee-85]: https://gitlab.com/gitlab-org/gitlab-ee/merge_requests/173\n[explore]: https://gitlab.com/explore\n[get-help]: /get-help/\n[gitlab83]: /2015/12/22/gitlab-8-3-released\n[gitlab-com]: /pricing/\n[gitlab-ee]: /features/#enterprise\n[GitLab Team]: /company/team/#XMDRamos\n[sign-up]: https://gitlab.com/users/sign_in \"Sign Up, it's free!\"\n[twitter]: https://twitter.com/gitlab\n\n[Brunch]: http://brunch.io/\n[Bundler]: http://bundler.io/\n[Cloudflare]: /2017/02/07/setting-up-gitlab-pages-with-cloudflare-certificates/\n[Coffee Script]: http://coffeescript.org/\n[dns-A]: https://support.dnsimple.com/articles/a-record/\n[dns-cname]: https://en.wikipedia.org/wiki/CNAME_record\n[dns-zone-examples]: http://docs.businesscatalyst.com/user-manual/site-settings/site-domains/updating-dns-records-with-a-domain-registrar-external-dns\n[git]: https://git-scm.com/about\n[git-docs-gitignore]: https://git-scm.com/docs/gitignore\n[go]: https://golang.org/\n[Google V8]: https://developers.google.com/v8/\n[Harp]: http://harpjs.com/\n[Hexo]: https://hexo.io/\n[Hyde]: http://hyde.github.io/\n[Hugo]: https://gohugo.io/\n[Jekyll]: https://jekyllrb.com\n[Jekyll Documentation]: http://jekyllrb.com/docs/home/\n[Jekyll Plugin]: https://jekyllrb.com/docs/plugins/\n[Lektor]: https://www.getlektor.com/\n[lets-encrypt]: /blog/tutorial-securing-your-gitlab-pages-with-tls-and-letsencrypt/\n[Liquid]: https://github.com/Shopify/liquid/wiki\n[Markdown]: http://daringfireball.net/projects/markdown/\n[Metalsmith]: http://www.metalsmith.io/\n[Middleman]: https://middlemanapp.com/\n[Nanoc]: https://nanoc.ws/\n[node]: https://nodejs.org/en/\n[node-422]: https://hub.docker.com/_/node/\n[Pelican]: http://blog.getpelican.com/\n[Python]: https://www.python.org/\n[Ruby]: https://www.ruby-lang.org/\n[Sass]: http://sass-lang.com/\n[SSGs]: https://www.staticgen.com/\n[StartSSL]: https://startssl.com/\n[wiki-static-websites]: https://en.wikipedia.org/wiki/Static_web_page\n[YAML]: http://yaml.org/\n[Virtua Creative]: http://virtuacreative.com.br/en/\n\n[ci-examples]: https://gitlab.com/groups/pages\n[html-examples]: https://gitlab.com/groups/html-themes\n[jekyll-examples]: https://gitlab.com/groups/jekyll-themes\n[middle-examples]: https://gitlab.com/groups/middleman-themes\n[themes-templates]: https://gitlab.com/themes-templates\n\n[jekyll-proj]: https://gitlab.com/jekyll-themes/default-bundler\n[jekyll-253-example]: https://gitlab.com/jekyll-themes/carte-noire\n[hexo-proj]: https://gitlab.com/themes-templates/hexo\n[pages-hexo]: https://gitlab.com/pages/hexo\n",{"slug":9145,"featured":6,"template":678},"gitlab-pages-setup","content:en-us:blog:gitlab-pages-setup.yml","Gitlab Pages Setup","en-us/blog/gitlab-pages-setup.yml","en-us/blog/gitlab-pages-setup",{"_path":9151,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9152,"content":9158,"config":9163,"_id":9165,"_type":16,"title":9166,"_source":17,"_file":9167,"_stem":9168,"_extension":20},"/en-us/blog/shared-runners",{"title":9153,"description":9154,"ogTitle":9153,"ogDescription":9154,"noIndex":6,"ogImage":9155,"ogUrl":9156,"ogSiteName":692,"ogType":693,"canonicalUrls":9156,"schema":9157},"GitLab.com Shared Runners use Autoscaling","With the latest release of GitLab Runner 1.1, we've introduced autoscaling to help us meet the growing demand","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749684484/Blog/Hero%20Images/agile.jpg","https://about.gitlab.com/blog/shared-runners","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab.com Shared Runners use Autoscaling\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kamil Trzciński\"}],\n        \"datePublished\": \"2016-04-05\",\n      }",{"title":9153,"description":9154,"authors":9159,"heroImage":9155,"date":9161,"body":9162,"category":14},[9160],"Kamil Trzciński","2016-04-05","\n\n\n**2022 Update** - GitLab.com SaaS Runners has evolved since the time of this blog post. See the up-to-date documentation on the [SaaS Runners fleet for Linux, Windows, and Mac](https://docs.gitlab.com/ee/ci/runners/index.html). \n\n\nNot only is [Continuous Integration][docs-ci] built-in with GitLab CE and EE,\nbut we also offer [Shared Runners][docs-runners] to run your builds in CI *for\nfree* on GitLab.com. Up until recently, you may have experienced a short wait\ntime as your build got queued for a shared runner. With the [latest release of\nGitLab Runner 1.1][runner-release], we've introduced autoscaling to help us meet\nthe growing demand, and this is now available on GitLab.com. Less waiting, more\nbuilding!\n\n\u003C!--more-->\n\n## Scaling the service\n\nProjects hosted in GitLab can have CI tasks defined in their [`.gitlab-ci.yml`\nfiles](http://doc.gitlab.com/ce/ci/yaml/README.html). These tasks are performed\nby [*runners*][docs-runners] which are essentially virtual machines which run\nyour builds in Docker containers. These machines can run any of your builds that\nare compatible with Docker.\n\nOn other platforms, similar functionality is only available with an add-on\ncharge. In GitLab it's free to connect your own runners, and we also began\noffering free [Shared Runners][docs-runners] on GitLab.com. That means Shared\nRunners are freely available for projects on GitLab.com, whether they are\nprivate or public. However, up until recently users would have noticed their\nbuilds would be queued to run as they waited for a shared runner to become\navailable for work.\n\nToday we are extending our offering, enabling the [recently announced][runner-release]\nautoscaling feature. This will reduce the build times and also reduce the time\nrequired to allocate a new available machine.\n\nAs of today, the Shared Runners for GitLab.com use the new GitLab Runner 1.1.\nGitLab Runner is configured in autoscaling mode with distributed cache and\nDocker registry proxy for Docker images.\n\n## Using Shared Runners\n\nYou will be able to continue using the Shared Runners for testing and deploying\nyour private projects.\n\nThe Shared Runners will continue to be used to build your static pages that\nare served by [GitLab Pages][docs-pages].\n\n## The machines\n\nAll your builds run on [Digital Ocean](https://www.digitalocean.com/) 4GB\ninstances, with CoreOS and the latest Docker Engine installed.\n\nYour builds will always be run on fresh machines. This will effectively\neliminate possible security issues, as there is no potential of breaking\nout of the container.\n\n## The tags\n\nAll Shared Runners are tagged with `shared`, `docker` and `linux`.\n\nYou can use these tags in your `.gitlab-ci.yml` file to limit which runners are\nused for specific jobs:\n\n```\ntest:\n  ...\n  tags:\n  - shared\n\ndeploy:\n  ...\n  tags:\n  - my_private_runner\n```\n\nThe above script will configure GitLab to always run your tests on shared\nrunners, and run deployments only on your specific runner, registered with\na `my_private_runner` tag.\n\n## What has changed\n\nPreviously, runners were configured to always start the `mysql`, `postgres`,\n`redis`, and `mongodb` services.\nHowever, we are aware that most of our users don't need to use all (or even any)\nof these services, and have removed them from the default configuration.\n\nIf your builds _do_ require one or more of these services, your builds may start\nto fail unexpectedly. Modify your `.gitlab-ci.yml` file to add the services\nrequired by your application:\n\n```\nservices:\n- mysql\n- postgres\n- redis\n- mongodb\n\ntests:\n  script: run-my-tests\n  ...\n```\n\n## Final configuration\n\nYou may be interested what GitLab Runner [config.toml][config-toml] looks like.\nIt's really simple!\n\n```\n[[runners]]\n  name = \"docker-auto-scale\"\n  limit = X\n  url = \"https://gitlab.com/ci\"\n  token = \"SHARED_RUNNER_TOKEN\"\n  executor = \"docker+machine\"\n  [runners.docker]\n    image = \"ruby:2.1\"\n    privileged = false\n    volumes = [\"/cache\", \"/usr/local/bundle/gems\"]\n  [runners.machine]\n    IdleCount = 20\n    IdleTime = 1800\n    MaxBuilds = 1\n    MachineDriver = \"digitalocean\"\n    MachineName = \"machine-%s-digital-ocean-4gb\"\n    MachineOptions = [\n      \"digitalocean-image=coreos-beta\",\n      \"digitalocean-ssh-user=core\",\n      \"digitalocean-access-token=DIGITAL_OCEAN_ACCESS_TOKEN\",\n      \"digitalocean-region=nyc2\",\n      \"digitalocean-size=4gb\",\n      \"digitalocean-private-networking\",\n      \"engine-registry-mirror=http://IP_TO_OUR_REGISTRY_MIRROR\"\n    ]\n  [runners.cache]\n    Type = \"s3\"\n    ServerAddress = \"IP_TO_OUR_CACHE_SERVER\"\n    AccessKey = \"ACCESS_KEY\"\n    SecretKey = \"ACCESS_SECRET_KEY\"\n    BucketName = \"runner\"\n```\n\nThe above configuration says that the VM will be used only once, making your builds secure.\nWe will always have 20 machines waiting to pick up a new build.\nWe use Digital Ocean 4GB machine in NYC2, with CoreOS Beta and Docker 1.9.1 installed.\nThe runner is configured to use [Docker Hub Registry Mirror][docker-mirror] and [Distributed runners caching][docker-caching].\n\nHappy building!\n\n## Live webcast: GitLab CI\n\nSign up for our webcast on April 14th, which includes an overview and tutorial\nabout using GitLab CI. Join to meet with the GitLab CI team and get your questions\nanswered live!\n\n- Date: Thursday, April 14, 2016\n- Time: 5pm (17:00) UTC; 12pm EST; 9am PST\n- [Register here](http://page.gitlab.com/apr-2016-gitlab-intro-ci-webcast.html)\n\nCan't make it? Register anyway, and we'll send you a link to watch it later!\n\n[docs-ci]: http://doc.gitlab.com/ce/ci/README.html\n[docs-pages]: http://doc.gitlab.com/ee/pages/README.html\n[docs-runners]: http://doc.gitlab.com/ce/ci/runners/README.html\n[runner-release]: /releases/2016/03/29/gitlab-runner-1-1-released/\n[docker-mirror]: https://gitlab.com/gitlab-org/gitlab-runner/blob/master/docs/configuration/autoscale.md#distributed-docker-registry-mirroring\n[docker-caching]: https://gitlab.com/gitlab-org/gitlab-runner/blob/master/docs/configuration/autoscale.md#distributed-runners-caching\n[config-toml]: https://gitlab.com/gitlab-org/gitlab-runner/blob/master/docs/configuration/advanced-configuration.md\n",{"slug":9164,"featured":6,"template":678},"shared-runners","content:en-us:blog:shared-runners.yml","Shared Runners","en-us/blog/shared-runners.yml","en-us/blog/shared-runners",{"_path":9170,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9171,"content":9177,"config":9181,"_id":9183,"_type":16,"title":9184,"_source":17,"_file":9185,"_stem":9186,"_extension":20},"/en-us/blog/gitlab-pages-get-started",{"title":9172,"description":9173,"ogTitle":9172,"ogDescription":9173,"noIndex":6,"ogImage":9174,"ogUrl":9175,"ogSiteName":692,"ogType":693,"canonicalUrls":9175,"schema":9176},"Get Started with GitLab Pages","We've since added some great resources to help you get started with Gitlab Pages, including this handy quickstart guide.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749684195/Blog/Hero%20Images/ios-development.jpg","https://about.gitlab.com/blog/gitlab-pages-get-started","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Get Started with GitLab Pages\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Achilleas Pipinellis\"}],\n        \"datePublished\": \"2016-04-04\",\n      }",{"title":9172,"description":9173,"authors":9178,"heroImage":9174,"date":9179,"body":9180,"category":14},[8273],"2016-04-04","\n\nWith [GitLab Pages][docs-pages] you can host your static website for free.\nWe added GitLab Pages in GitLab Enterprise Edition (EE) 8.3, and\nthen added support for custom domains and TLS certificates in GitLab EE 8.5. We\nmade this service freely available to users on [GitLab.com](https://gitlab.com),\nwhich is our hosted GitLab EE service, offering unlimited and free public or\nprivate projects.\n\nWe've since added some great resources to help you get started, including this\nhandy [quickstart guide][quickstart].\n\n\u003C!-- more -->\n\n## What you need to know about GitLab Pages\n\n1. There are two kinds of Pages:\n    - User or group Pages\n    - Project Pages\n2. You can use [any static site generator][staticgen]\n3. You can connect custom domains and TLS certificates to secure your domains\n4. The service is completely free as part of GitLab.com\n\n## New resources to learn how to use GitLab Pages\n\nWe added improved documentation to help you get your site set up.\n\n- GitLab Pages Quick Start Guide: https://pages.gitlab.io\n- Documentation: [GitLab Pages User guide][docs-pages]\n- Documentation: [GitLab Pages Admin guide][docs-adminpages]\n\nWe also [added a group][group] with a number of example GitLab Pages projects.\n\n![GitLab Pages example projects](https://about.gitlab.com/images/blogimages/gitlab-pages-examples.png)\n\nYou can easily get started with a [Plain HTML](https://gitlab.com/pages/plain-html)\nsite, but you can do much more.\nThe range of examples show that GitLab can support *any static site generator*.\nYou name the generator, you can build it with GitLab!\n\n- [Jekyll](https://gitlab.com/pages/jekyll)\n- [Pelican](https://gitlab.com/pages/pelican)\n- [Hugo](https://gitlab.com/pages/hugo)\n- [Middleman](https://gitlab.com/pages/middleman)\n- [Hexo](https://gitlab.com/pages/hexo)\n- [Brunch](https://gitlab.com/pages/brunch)\n- [Metalsmith](https://gitlab.com/pages/metalsmith)\n- [Harp](https://gitlab.com/pages/harp)\n\nAll of this is made possible with [GitLab CI][ci]. If you'd like to know more,\nsign up for our webcast below!\n\n## Need some help to get started?\n\nWe'd love your feedback on our [GitLab Pages Quick Start][quickstart] guide.\nIf you have any questions you can submit them in the comments,\nor on the [issue tracker] for the GitLab Pages Quick Start Guide project.\n\n## Live webcast: GitLab CI\n\nSign up for our webcast on April 14th, which includes an overview and tutorial\nabout using GitLab CI. Meet people from the GitLab CI team and get your questions\nanswered live!\n\n- Date: Thursday, April 14, 2016\n- Time: 5pm (17:00) UTC; 12pm EST; 9am PST\n- [Register here](http://page.gitlab.com/apr-2016-gitlab-intro-ci-webcast.html)\n\nCan't make it? Register anyway, and we'll send you a link to watch it later!\n\n[issue tracker]: https://gitlab.com/pages/pages.gitlab.io/issues\n[docs-pages]: http://doc.gitlab.com/ee/pages/README.html\n[docs-adminpages]: http://doc.gitlab.com/ee/pages/administration.html\n[quickstart]: https://pages.gitlab.io\n[group]: https://gitlab.com/groups/pages\n[ci]: /solutions/continuous-integration/ [staticgen]: https://www.staticgen.com/\n",{"slug":9182,"featured":6,"template":678},"gitlab-pages-get-started","content:en-us:blog:gitlab-pages-get-started.yml","Gitlab Pages Get Started","en-us/blog/gitlab-pages-get-started.yml","en-us/blog/gitlab-pages-get-started",{"_path":9188,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9189,"content":9195,"config":9199,"_id":9201,"_type":16,"title":9202,"_source":17,"_file":9203,"_stem":9204,"_extension":20},"/en-us/blog/using-omnibus-gitlab-to-ship-gitlab",{"title":9190,"description":9191,"ogTitle":9190,"ogDescription":9191,"noIndex":6,"ogImage":9192,"ogUrl":9193,"ogSiteName":692,"ogType":693,"canonicalUrls":9193,"schema":9194},"Using the Omnibus GitLab package to ship GitLab","Take a look at what kind of decisions we need to make on every release of GitLab and how omnibus-gitlab package fits into this process.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749684635/Blog/Hero%20Images/cat-in-the-box.jpg","https://about.gitlab.com/blog/using-omnibus-gitlab-to-ship-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Using the Omnibus GitLab package to ship GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Marin Jankovski\"}],\n        \"datePublished\": \"2016-03-21\",\n      }",{"title":9190,"description":9191,"authors":9196,"heroImage":9192,"date":9197,"body":9198,"category":14},[6528],"2016-03-21","\n\nTwo years ago we announced that [GitLab is now simple to install] to great\ntriumph. Since then, GitLab has grown to become an irreplaceable tool for\nmany professionals. Part of this success can certainly be credited to an easier\ninstallation process using the omnibus-gitlab packages. The packages, however,\nseem to have polarized people to either\n[love](https://twitter.com/invalidusrname/status/673862628125614080)\n[them](https://twitter.com/Merenon/status/692027386272047104), or\n[hate](https://twitter.com/phessler/status/672747920635109376)\n[them](https://twitter.com/jiphex/status/672746104103051265).\n\nLet's take a look at what kind of decisions we need to make on\nevery release of GitLab and how omnibus-gitlab package fits into this process.\n\n\u003C!--more-->\n\n## Omnibus concept\n\nThe [Omnibus project] is the brainchild of Chef Inc.\nTheir product, Chef Server, was notoriously hard to install and configure. To\ntackle this issue the Omnibus project was created. The idea behind it was to\nhave one binary package for the supported OS that would install all required\ndependencies and allow configuration of each required component.\nThis meant that the end binary package will not be lean like the packages\nthat people usually encounter. In fact, they will be \"fat\" and\nhence [the name Omnibus].\nThis also meant going against [the Unix design principles] which favor\n[composable components] as opposed to [monolithic software] (which reminds Unix\nusers too much of Windows software).\n\nThe concept is simple:\n\n* Have one binary package that contains all required components.\n* Make sure that a specific version of the package has all required components.\n* Make sure that the supplied components have the version that is known to work\n  in a predictable manner with other components.\n\n## Installing GitLab from source\n\nGitLab is facing similar challenges.\n[Installation and upgrade guides] for what we call *installation from source*\nare available but they are at least 10 pages long.\n\nThey show how to install and configure multiple dependencies,\nsystem users, which directories and files need to exist, and user permissions\nthey need to have and so on. Installing a wrong version of a dependency\nmeans that things might not work as expected, so it is imperative to follow the\nguide strictly to the letter.\n\nUpgrading can sometimes be challenging. The update guide is shorter but there are time constraints that can make the upgrade stressful. Having everyone breathing down\nyour neck and expecting everything to go smoothly makes upgrading very stressful.\n\n## Omnibus-gitlab\n\nEnter the omnibus-gitlab package.\n\nAnyone should be able to install and configure GitLab with minimum knowledge.\nGitLab should be available on the most widely-used Linux distributions.\n\nWe want the focus to be on GitLab and its features.\nInstallation and upgrades should be easy and almost enjoyable!\n\nThis is how omnibus-gitlab was born.\n\nBenefits for everyone:\n\n1. Users need only to apply minimal effort to install GitLab.\n1. Users need only to provide minimum configuration to get GitLab up and running.\n1. Users can easily upgrade between GitLab versions.\n1. Users are encouraged to upgrade to the latest version of GitLab which is\nalways better than the previous one.\n\nBenefits for the maintainers of GitLab:\n\n1. We provide our users with only one binary package they would need to\ninstall.\n1. We ship packages for multiple platforms at the same time.\n1. We make sure that the components that GitLab requires for a specific\nversion are shipped.\n1. We know that the components are running on versions that are compatible.\n1. It becomes easier to support any issues users have because we have a more\nconsistent environment.\n1. We maintain one project that covers all of the above.\n\nThe last point is very important for a project like GitLab.\n\nGitLab has a monthly release cycle. Every month on the 22nd we need to release\na new version. We usually follow up the release with a couple of patch releases\nto address any regressions that might have been introduced previously.\nGiven how important GitLab is to the development infrastructure, we need to be\nable to react quickly to any vulnerabilities in GitLab or any of its components.\n\n### A Silver Bullet?\n\nNot quite.\n\nThe omnibus-gitlab package does a lot for the end user but because of that it\nmakes a lot of choices for the user. It will create the directories and files\non the file system and assume that it can do so.\nIt will create the system users. It will occupy the ports it needs.\nIt ships with its own UNIX init supervision system, runit.\nIt ships with libraries that may already exist on the system\n(albeit maybe of a different version).\n\nFor a very large portion of users all of the above won't matter but there are\nenvironments which are highly restrictive.\nThe package has a lot of configuration to make it easier to adjust to the\nenvironment but this can be a lot of work to get right.\nWe are always working on making the package even more customizable while\nassuming the best possible defaults for users who don't need to customize.\nHowever, it is a marathon rather than a sprint.\n\n### Alternatives to Omnibus we've considered\n\nWe are always evaluating the new options that become available.\nLet's take a look at a few of the options that we've already considered.\n\n#### Docker images\n\nTwo years ago Docker was still a very new project. It had problems like any new\nproject (like us!) The number of users using it in production is growing but\nwe could not and cannot count on everyone supporting Docker in their\nenvironments. Introducing Docker into your environment adds another piece of\nsoftware that needs support and not everyone can add this layer.\n\nThe packages in .deb and .rpm archive format are usually allowed in most if not\nall systems.\n\nWe do release new [Docker] images on every release as an additional method of\ninstalling GitLab.\n\n#### Native Debian Packages\n\nUsers encouraging us to ship GitLab as a native Debian package usually say that\nthis would keep us in line with the Unix design principles and we can leverage\npackages that already exist on the system instead of reinventing the wheel! You\nmost likely already have openssl installed on your system, why do you want to\nship another one?\n\nLet's take a look at what that would entail:\n\n1. Packaging over 300 Ruby gems as separate packages. (This is Spartaaa!)\n1. If a component version we require does not exist in the system package\nrepository, tell the user to compile it.\n1. Do this at least once a month to be able to follow the monthly release.\n1. Make sure that any change that was created in GitLab by us or any of the\ncontributors does not break the package.\n\nNative packages are more suited for the slower release cycles and this clashes\nwith the way GitLab does releases.\n\nWe also don't have enough expertise and big enough team to do native packaging.\nIt is a lot of work and we would need a dedicated team only for the packaging\nfor this specific platform.\n\nThere is some good news though!\nPirate Praveen has been working for the past 6-8 months on\n[native Debian packages].\nThe packages are almost ready to be included in the Debian package\nrepository.\n\nThis will allow all users who do not want the omnibus-gitlab package to touch\ntheir system to easily install GitLab.\n\nWe have yet to see how much of an effort will it be to release new\nversions but this is something that will be announced once the packages are\nready.\n\n#### Native Fedora Packages\n\nThis case is pretty much the same as the native Debian packages.\nThere was an attempt to package GitLab for Fedora as a part of the\n[Google Summer of Code project] by Achilleas Pipinellis, who has since become\na GitLab team-member. Through that effort, we learned it is a multi-person\njob and packaging alone is a lot of work. So, the project was never completed.\n\nIf you are interested in helping to create the native Fedora packages,\nyou can leave your comment in\n[this issue on GitLab CE issue tracker.](https://gitlab.com/gitlab-org/gitlab-ce/issues/14043)\n\n#### Anything else\n\nWe've been asked a few times why we don't just let Chef, Puppet, or Ansible to\nbe configured by the developer.\n\nYou can still use your favourite configuration management tool to do this work.\nHowever, be advised that it is _still_ a lot of work. That also means that for\nevery GitLab update, the administrator needs to go through a list of changes\nand see if they need to upgrade the software. If they don't, GitLab might not\nwork as expected.\nThe end user most likely won't care how the setup is done, they might just see\nsomething not working as they would expect. That is a risk we want to remove if\nwe can.\n\n\n## Conclusion\n\nOne of GitLab's strengths is that we are able to have a very short release\ncycle, getting the updates to all our users very quickly.\nThe omnibus-gitlab packages aren't perfect but they are currently the best\noption currently for frequent GitLab updates.\n\nIf you consider the amount of time required to maintain eight packages\n(four platforms, one package each for CE and EE, two docker images,\ntwo Raspberry Pi 2 packages),\nthe monthly release cycle, and making upgrades between versions and\ninstallations as simple as possible,\nthen omnibus-gitlab is doing a very good job.\n\n[A lot](https://twitter.com/choyer/status/670273120566120449)\n[of users](https://twitter.com/jrblier/status/613077041219399681)\n[that have been using](https://twitter.com/mickael_andrieu/status/646278424936480768)\nthe omnibus-gitlab packages\n[to maintain](https://twitter.com/invalidusrname/status/673862628125614080)\n[their GitLab installation](https://twitter.com/J_Salamin/status/687884326629937152)\n[seem to](https://twitter.com/alexzeitler_/status/692812151296282625)\n[agree with this](https://twitter.com/berkeleynerd/status/692093491149582339).\n\nWith the omnibus-gitlab packages available for everyone, we can work in parallel\nto create more ways to install GitLab.\n\nWant to help improve omnibus-gitlab package? Contribute to omnibus-gitlab at the\n[omnibus-gitlab repository].\n\nWant to work on making GitLab available on your favourite platform but need\nsome feedback? Get in touch through the [GitLab CE issue tracker].\n\nAre you in New York on April 12th, 2016?\nAsk me a question at the [Software Architecture Conference] where I'll be\nspeaking about Shipping a Ruby on Rails stack to thousands of companies every\nmonth.\n\n\n[GitLab is now simple to install]: /blog/gitlab-is-now-simple-to-install/\n[Omnibus project]: https://github.com/chef/omnibus\n[the name omnibus]: https://en.wikipedia.org/wiki/Omnibus\n[the Unix design principles]: https://en.wikipedia.org/wiki/Unix_philosophy\n[composable components]: https://en.wikipedia.org/wiki/Composability\n[monolithic software]: https://en.wikipedia.org/wiki/Monolithic_application\n[Installation and upgrade guides]: http://doc.gitlab.com/ce/install/installation.html\n[Docker]: https://hub.docker.com/u/gitlab/\n[native Debian packages]: https://wiki.debian.org/gitlab\n[Google Summer of Code project]: https://fedoraproject.org/wiki/User:Axilleas/GitLab\n[omnibus-gitlab repository]: https://gitlab.com/gitlab-org/omnibus-gitlab\n[GitLab CE issue tracker]: https://gitlab.com/gitlab-org/gitlab-ce/issues\n[Software Architecture Conference]: http://conferences.oreilly.com/software-architecture/engineering-business-us/public/schedule/speaker/228210\n",{"slug":9200,"featured":6,"template":678},"using-omnibus-gitlab-to-ship-gitlab","content:en-us:blog:using-omnibus-gitlab-to-ship-gitlab.yml","Using Omnibus Gitlab To Ship Gitlab","en-us/blog/using-omnibus-gitlab-to-ship-gitlab.yml","en-us/blog/using-omnibus-gitlab-to-ship-gitlab",{"_path":9206,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9207,"content":9212,"config":9216,"_id":9218,"_type":16,"title":9219,"_source":17,"_file":9220,"_stem":9221,"_extension":20},"/en-us/blog/fast-search-using-postgresql-trigram",{"title":9208,"description":9209,"ogTitle":9208,"ogDescription":9209,"noIndex":6,"ogImage":2478,"ogUrl":9210,"ogSiteName":692,"ogType":693,"canonicalUrls":9210,"schema":9211},"Fast Search Using PostgreSQL Trigram Text Indexes","In this article we'll look at how these indexes work and how they can be used to speed up queries using LIKE conditions.","https://about.gitlab.com/blog/fast-search-using-postgresql-trigram-indexes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Fast Search Using PostgreSQL Trigram Text Indexes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Yorick Peterse\"}],\n        \"datePublished\": \"2016-03-18\",\n      }",{"title":9208,"description":9209,"authors":9213,"heroImage":2478,"date":9214,"body":9215,"category":14},[6025],"2016-03-18","\nGitLab 8.6 will ship with improved search performance for PostgreSQL thanks to\nthe use of trigram indexes. In this article we'll look at how these indexes work\nand how they can be used to speed up queries using LIKE conditions.\n\n\u003C!--more-->\n\n## How to Use PostgreSQL Fast Search in GitLab?\n\nGitLab allows users to search for issues, comments, commits, code, merge\nrequests, and snippets. The traditional approach to developing a system for\nsearching data like this in an RDBMS is to simply use a LIKE condition. A LIKE\noperates on a string that specifies what to search for and optional percentage\nsigns acting as wildcards. For example, to match all values starting with\n\"Alice\" you'd use the string `'Alice%'`. If one wants to search all records in a\ntable (e.g. all users) they might write the following query:\n\n    SELECT *\n    FROM users\n    WHERE name LIKE 'Alice%';\n\nThe wildcards used for a LIKE condition can appear anywhere (and optionally\nmultiple times) in the string, as such all these values are valid:\n\n* `'Alice%'`\n* `'%Alice'`\n* `'%Alice%'`\n* `'%Al%ice%'`\n\nHowever, wildcards being allowed poses a problem: index usage. When a wildcard\nappears somewhere at the end of a string both MySQL and PostgreSQL are able to\nuse any existing indexes. However, when a wildcard appears at the start of a\nstring things become problematic. To better understand the problem, imagine you\nhave the following list of names:\n\n* Alice\n* Bob\n* Charlie\n* Eve\n* Emily\n\nWhen searching for any name containing \"li\" the only solution is to iterate over\nall values and check if each value contains the string \"li\". Because the value\ncan appear anywhere in the strings to search an index won't help as we'd still\nhave to compare every value one by one with no way of reducing the set of rows\nto search through. This in turn can lead to very slow queries depending on the\namount of data to search through.\n\n## Postgres Full Text Search\n\nSince both MySQL and PostgreSQL provide full text searching capabilities one\nsolution would be to use this instead of a regular LIKE condition. Sadly both\nimplementations are not without their problems. Up until MySQL 5.6 full text\nsearch only worked on MyISAM tables which in turn meant not being able to use\ntransactions. Both MySQL and PostgreSQL also use different syntax for searching\nand require different steps to set things up. For example, MySQL uses the\nfollowing syntax:\n\n    SELECT *\n    FROM users\n    WHERE MATCH (username) AGAINST ('yorick');\n\nPostgreSQL uses the following instead:\n\n    SELECT *\n    FROM users\n    WHERE to_tsvector('english', username) @@ to_tsquery('english', 'yorick');\n\nThe differences in syntax make the code more complex. On top of that\nPostgresSQL full text search works best when the text vectors are stored in\nphysical columns with an index. This in turn means having to adjust all your\nqueries to use these columns instead of the regular ones, resulting queries such\nas:\n\n    SELECT *\n    FROM users\n    WHERE username_tsvector @@ to_tsquery('english', 'yorick');\n\nThis assumes `username_tsvector` contains a text vector built from the data\nstored in the `username` column. To further complicate matters you'd have to set\nup a stored procedure and database trigger to keep these text vector columns in\nsync with the ones containing the raw data.\n\nAnother problem with full text search is that words are broken up according to\nthe rules defined by the language of the text. For example, on PostgreSQL\nconverting \"Yorick Peterse\" to a text vector results in the values \"peters\" and\n\"yorick\". This means that searching for \"yorick\" or \"peterse\" _will_ match the\ndata, but searching for \"yor\" _will not_. To showcase this we can run the\nfollowing query in PostgreSQL:\n\n    SELECT 1\n    WHERE to_tsvector('english', 'Yorick Peterse') @@ to_tsquery('english', 'peterse');\n\nHere `to_tsvector()` creates a text vector with English as the language and\n\"Yorick Peterse\" as the input. The `to_tsquery()` function in turn creates a\ntext search query with English as the language and \"peterse\" as the input.\n\nRunning this query will result in a single row being returned. On the other\nhand, this will return no rows:\n\n    SELECT 1\n    WHERE to_tsvector('english', 'Yorick Peterse') @@ to_tsquery('english', 'yor');\n\nThis is problematic when you don't know exactly what you're looking for, for\nexample when you're looking for a person but only know part of their first name.\n\nIn short, full text search is only really an option if you only support\nPostgreSQL or MySQL as supporting both leads to a lot of unwanted complexity.\n\n## Trigram Indexes\n\nWhile MySQL offers no further solutions (that I know of), PostgreSQL on the\nother hand has some extra tricks up its sleeves: trigram indexes. Trigram\nindexes work by breaking up text in [trigrams][trigrams]. Trigrams are basically\nwords broken up into sequences of 3 letters. For example, the trigram for\n\"alice\" would be:\n\n    {ali, lic, ice}\n\nPostgreSQL supports trigram indexes and operations via the [pg_trgm][pg_trgm]\nextension. This extension adds a few functions, operators, and support for\ntrigram indexes (Postgres using GIN or GiST indexes to be exact). To see what kind of\ntrigrams PostgreSQL can produce we can run the following query:\n\n    select show_trgm('alice');\n\nThis will generate the trigrams for the string \"alice\", producing the following\noutput:\n\n                show_trgm\n    ---------------------------------\n     {\"  a\",\" al\",ali,\"ce \",ice,lic}\n\nA big benefit of this extension is that these trigram indexes can be used by the\nLIKE and ILIKE conditions without having to change your queries or setting up\ncomplex full text search systems. There are 2 requirements for this to work:\n\n1. The index created must be either a GIN or a GiST index, in case of GitLab we\n   went with Postgres using GIN indexes due to them leading to better query timings (at the\n   cost of being larger and somewhat slower to build).\n2. The index must have the appropriate [operator class][opclass] set.\n\nIn case of a GIN index the operator class we have to use is called\n`gin_trgm_ops`. We can create the appropriate indexes using a query such as the\nfollowing:\n\n    CREATE INDEX CONCURRENTLY index_issues_on_title_trigram\n    ON issues\n    USING gin (title gin_trgm_ops);\n\nTo showcase the impact these indexes have on performance let's use the following\nquery as an example:\n\n    SELECT COUNT(*)\n    FROM users\n    WHERE username ILIKE '%yorick%';\n\nThis query counts the amount of users where the username contains the string\n\"yorick\", regardless of the casing. Running this query on my local PostgreSQL\ndatabase takes around 160 milliseconds and produces the following query plan:\n\n     Aggregate  (cost=8143.40..8143.41 rows=1 width=0) (actual time=157.981..157.982 rows=1 loops=1)\n       ->  Index Only Scan using index_users_on_username on users  (cost=0.42..8143.34 rows=26 width=0) (actual time=155.153..157.974 rows=6 loops=1)\n             Filter: (username ~~* '%yorick%'::text)\n             Rows Removed by Filter: 257532\n             Heap Fetches: 0\n     Planning time: 0.143 ms\n     Execution time: 158.008 ms\n\nTo speed this up we'll run the following to create an index:\n\n    CREATE INDEX CONCURRENTLY index_users_on_username_trigram\n    ON users\n    USING gin (username gin_trgm_ops);\n\nIf we now re-run the query it takes only around 0.2 milliseconds and produces\nthe following query plan:\n\n     Aggregate  (cost=152.41..152.42 rows=1 width=0) (actual time=0.128..0.128 rows=1 loops=1)\n       ->  Bitmap Heap Scan on users  (cost=52.20..152.35 rows=26 width=0) (actual time=0.115..0.126 rows=6 loops=1)\n             Recheck Cond: (username ~~* '%yorick%'::text)\n             Heap Blocks: exact=6\n             ->  Bitmap Index Scan on index_users_on_username_trigram  (cost=0.00..52.19 rows=26 width=0) (actual time=0.106..0.106 rows=6 loops=1)\n                   Index Cond: (username ~~* '%yorick%'::text)\n     Planning time: 0.366 ms\n     Execution time: 0.167 ms\n\nIn other words, creating the trigram index results in the query being around\n946 times faster.\n\n## GitLab & Trigram Indexes\n\nGitLab 8.6 will create trigram indexes for PostgreSQL users leading to vastly\nimproved search performance (though there's still some work to be done in the\nfuture). To make this work (while still supporting MySQL) we did have to port\nover some changes from an open Rails pull request to ensure the indexes were\ndumped properly to `db/schema.rb`. These changes can be found in\n[config/initializers/postgresql_opclasses_support.rb][opclass-support] and were\ntaken from [Rails pull request #19090][rails-pr-19090].\n\nWe also had to make some changes to ensure MySQL doesn't end up trying to create\nthese indexes when loading the schema definition into a database. For example,\n`db/schema.rb` contains lines such as `add_index ..., using: :gin` and the\n`using` option is passed straight to the underlying database. Since MySQL\ndoesn't support GIN indexes this would lead to database errors when trying to\nload `db/schema.rb`. The code that makes this work can be found in\n[config/initializers/mysql_ignore_postgresql_options.rb][mysql-ignore-pg].\n\nFinally we made some small changes to the code to ensure queries automatically\nuse ILIKE on PostgreSQL instead of `lower(some_column)` as ILIKE performs quite\na bit better. On MySQL a regular LIKE is used as it's already case-insensitive.\n\nAll of the other details can be found in GitLab CE merge request [\"Refactor\nsearching and use PostgreSQL trigram indexes for significantly improved\nperformance\"][mr2987].\n\n[trigrams]: https://en.wikipedia.org/wiki/Trigram\n[pg_trgm]: http://www.postgresql.org/docs/current/static/pgtrgm.html\n[opclass]: http://www.postgresql.org/docs/current/static/indexes-opclass.html\n[mr2987]: https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/2987\n[opclass-support]: https://gitlab.com/gitlab-org/gitlab-ce/blob/0602091f0cdebbc3183732dee78c38f89b4b7d01/config/initializers/postgresql_opclasses_support.rb\n[mysql-ignore-pg]: https://gitlab.com/gitlab-org/gitlab-ce/blob/0602091f0cdebbc3183732dee78c38f89b4b7d01/config/initializers/mysql_ignore_postgresql_options.rb\n[rails-pr-19090]: https://github.com/rails/rails/pull/19090\n",{"slug":9217,"featured":6,"template":678},"fast-search-using-postgresql-trigram","content:en-us:blog:fast-search-using-postgresql-trigram.yml","Fast Search Using Postgresql Trigram","en-us/blog/fast-search-using-postgresql-trigram.yml","en-us/blog/fast-search-using-postgresql-trigram",{"_path":9223,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9224,"content":9229,"config":9233,"_id":9235,"_type":16,"title":9236,"_source":17,"_file":9237,"_stem":9238,"_extension":20},"/en-us/blog/setting-up-gitlab-ci-for-ios-projects",{"title":9225,"description":9226,"ogTitle":9225,"ogDescription":9226,"noIndex":6,"ogImage":9174,"ogUrl":9227,"ogSiteName":692,"ogType":693,"canonicalUrls":9227,"schema":9228},"Setting up GitLab CI for iOS projects","Learn how to set up GitLab CI for your iOS projects.","https://about.gitlab.com/blog/setting-up-gitlab-ci-for-ios-projects","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Setting up GitLab CI for iOS projects\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Angelo Stavrow\"}],\n        \"datePublished\": \"2016-03-10\",\n      }",{"title":9225,"description":9226,"authors":9230,"heroImage":9174,"date":9231,"body":9232,"category":14},[8778],"2016-03-10","\n\n_Note: This blog post was published in 2016. For more current info, check out [Tutorial: iOS CI/CD with GitLab](https://about.gitlab.com/blog/ios-cicd-with-gitlab/) from June 2023 and our [documentation on GitLab Mobile DevOps](https://docs.gitlab.com/ee/ci/mobile_devops.html). Those both cover fastlane, fastlane match, code signing, signing certificates, provision profiles, App Store Connect, and more._\n\nIn this post I'll show you how to set up GitLab CI for your iOS mobile projects,\nstep-by-step, from start to finish.\n\n\u003C!--more-->\n\n### Why CI?\n\n[Continuous integration](/topics/ci-cd/) (CI) is great tool for helping developers be more productive and write higher-\nquality code. By automatically running a suite of tests every time a commit is\npushed, everyone can see the results of changes to the codebase, and take action\nto make integration faster and easier.\n\nGitLab [comes with CI built-in](/solutions/continuous-integration/) for all\nprojects, for free.\n\nIt's beyond the scope of this tutorial to go into details on best practices,\nworkflows, and advantages/disadvantages of CI. In short, however, here's what\nhappens when you enable it for your Xcode project:\n\n1. You make changes to your copy of the codebase and push a commit to GitLab.\n2. GitLab recognizes that the codebase has changed.\n3. GitLab triggers a build with the GitLab Runner you set up on your Mac for the project.\n4. The GitLab Runner runs through the build and test process you specified in the `.gitlab-ci.yml` configuration file.\n5. The GitLab Runner reports its results back to GitLab.\n6. GitLab shows you the results of the build.\n\nThis post builds on [Jeremy White's blog post](http://www.thejeremywhite.com/blog/xcode-gitlab-ci-setup.html),\ngoing into a little more detail and correcting some steps for the environment\ndescribed in the next section.\n\n### Assumptions and environment\n\nThis post will provide a step-by-step guide to setting up GitLab CI for your iOS\n projects, from start to finish. First, however, we need to make a few assumptions.\n\n[GitLab's strategy document](/company/strategy/) hinges on one \nkey idea: _everyone can contribute_. As such, this post is written for readers\nof nearly all levels of experience. However, given that CI is a relatively\nadvanced topic, we're going to assume some basic knowledge of how to create\nXcode and GitLab projects, as well as some familiarity with Terminal and git.\n\nThis post was written with the following development environment in mind:\n\n- A Mac running macOS 10.11.3 \"El Capitan\"\n- Xcode 7.2.1 with command-line tools and the iOS 9.2 SDK installed\n- GitLab.com v8.5\n\nWe'll also assume you've already created a new GitLab project. If you haven't,\ngo ahead and do that now.\n\n### Setting up your Xcode project\n\nWe'll start by creating a new single-view iOS project in Xcode.\n\n![Creating a new Xcode project.](https://about.gitlab.com/images/blogimages/setting-up-gitlab-for-ios-projects/1_create-new-xcode-project.png)\n\nGive your project a name and make certain that the **Include Unit Tests** and\n**Include UI Tests** options are enabled for the project. Xcode will create a\ntemplate test class with some sample tests, which we'll use in this post as the\ntest suite that GitLab CI runs to verify a build. Choose a name for your project\nand click on **Next**.\n\n![Enable unit and UI tests in your project](https://about.gitlab.com/images/blogimages/setting-up-gitlab-for-ios-projects/2_enable-unit-tests.png)\n\nChoose where you'll save your iOS project. If you like, let Xcode create the git\nrepository on your Mac.\n\n![Let Xcode initialize your git repository.](https://about.gitlab.com/images/blogimages/setting-up-gitlab-for-ios-projects/3_create-git-repository.png)\n\nOnce Xcode has created and opened your iOS project, you need to [share its scheme](https://developer.apple.com/library/ios/recipes/xcode_help-scheme_editor/Articles/SchemeShare.html). Apple's [documentation](https://developer.apple.com/library/ios/recipes/xcode_help-scheme_editor/Articles/SchemeDialog.html) defines schemes nicely:\n\n> A scheme is a collection of settings that specify which targets to build, what build configuration to use, and the executable environment to use when the product specified by the target is launched.\n\nBy sharing your scheme, GitLab CI gets context it needs to build and test your project.\n\nTo share a scheme in Xcode, choose **Product** > **Scheme** > **Manage Schemes**.\n\n![Share your scheme.](https://about.gitlab.com/images/blogimages/setting-up-gitlab-for-ios-projects/4_share-xcode-scheme.png)\n\nClick on the **Close** button.\n\nYour Xcode project has been created with two test files; one includes sample unit\ntests, and the other includes sample UI tests. You can run **Product** > **Test**\nto run these tests, which will build your project, launch the Simulator, install\nthe project on the Simulator device, and run the test suite. You can see the\nresults right in Xcode:\n\n![Test suite success in Xcode.](https://about.gitlab.com/images/blogimages/setting-up-gitlab-for-ios-projects/5_test-suite-success-in-xcode.png)\n\nThe green checkmarks next to the test functions (both in the file, and in the\nTest navigator) show that all tests passed. We won't be referring to the Xcode\nproject anymore, so if you like, you can close it.\n\nNext, open Terminal and navigate to the folder you created for your iOS project.\n\nIt's convenient to add a standard `.gitignore` file. For a Swift project, enter:\n\n```\n$ curl -o .gitignore https://www.toptal.com/developers/gitignore/api/swift\n```\n\nFor an Objective-C project, enter:\n\n```\n$ curl -o .gitignore https://www.gitignore.io/api/objective-c\n```\n\nThe `curl` command conveniently downloads the contents of the page at the given [gitignore.io](https://gitignore.io) URL into a file named `.gitignore`.\n\nIf Xcode initialized the git repository for you, you'll need to set the origin\nurl to your GitLab project (replacing `\u003Cusername>` with your GitLab username\nand `\u003Cproject>` with the project name:\n\n```\n$ git remote add origin git@gitlab.com:\u003Cusername>/\u003Cproject>.git\n```\n\nThe final step here is to [install xcpretty](https://github.com/supermarin/xcpretty).\nWhen Xcode builds and tests your project, xcpretty will transform the output into\nsomething more readable for you.\n\n### Installing and registering the GitLab Runner\n\nThe GitLab Runner is a service that's installed on your Mac, which runs the build\nand test process that you set up in a configuration file. You can follow the\n[installation instructions for macOS](https://gitlab.com/gitlab-org/gitlab-runner/blob/master/docs/install/osx.md),\nbut we'll need to make some changes to the _register the runner_ step:\n\n```\n$ gitlab-ci-multi-runner register\nWARNING: Running in user-mode.                     \nWARNING: The user-mode requires you to manually start builds processing:\nWARNING: $ gitlab-runner run                       \nWARNING: Use sudo for system-mode:                 \nWARNING: $ sudo gitlab-runner...                   \n```\n\nIf you're using self-managed GitLab, the coordinator URL will be http(s)://url-of-your-gitlab-instance/ci`.\n\n```\nPlease enter the gitlab-ci coordinator URL (e.g. https://gitlab.com/ci):\nhttps://gitlab.com/ci\n```\n\nThe CI token for your project is available on GitLab's Project Settings page,\nunder _Advanced Settings_. Each project has a unique token.\n\n```\nPlease enter the gitlab-ci token for this runner:\n\u003CCI runner token from Project > Settings > Runner>\n```\n\nThe `register` process suggests the name of your Mac as a description for the\nrunner. You can enter something different if you like, or just hit **return** to\ncontinue.\n\n```\nPlease enter the gitlab-ci description for this runner:\n[Your-Mac's-Name.local]:\n```\n\nEnter whatever tags you'd like to further identify this particular runner. It's\nparticularly helpful when you need a particular build environment&mdash;for example,\niOS 9.2 on Xcode 7.2 on macOS 10.11 could use tags like `ios_9-2`, `xcode_7-2`,\nand `osx_10-11`. This way, we can filter our build stages in GitLab by toolchain,\nplatform, etc.\n\n```\nPlease enter the gitlab-ci tags for this runner (comma separated):\nios_9-2, xcode_7-2, osx_10-11\n```\n\nThe GitLab Runner will register the runner and give it a unique `runner` ID.\n\n```\nRegistering runner... succeeded                     runner=s8Bgtktb\n```\n\nThe GitLab Runner has to run `xcodebuild` to build and test the project, so we\nselect `shell` as the executor:\n\n```\nPlease enter the executor: virtualbox, ssh, shell, parallels, docker, docker-ssh:\nshell\nRunner registered successfully. Feel free to start it, but if it's running\nalready the config should be automatically reloaded!\n```\n\nContinue with the rest of the Runner installation instructions (`install` and\n`start`), per the documentation.\n\nGo to the _Runners_ page in your Project Settings and voilà:\n\n![The GitLab Runner is recognized in GitLab's Project Settings.](https://about.gitlab.com/images/blogimages/setting-up-gitlab-for-ios-projects/6_runner-registered.png)\n\nYour GitLab Runner is recognized and (almost) ready to go!\n\nYou can verify this by running\n\n```\n$ gitlab-ci-multi-runner verify\nWARNING: Running in user-mode.                     \nWARNING: The user-mode requires you to manually start builds processing:\nWARNING: $ gitlab-runner run                       \nWARNING: Use sudo for system-mode:                 \nWARNING: $ sudo gitlab-runner...                   \n\nVeryfing runner... is alive                         runner=25c780b3\n```\n\nNote that they have the same ID (in this case, `25c780b3`).\n\nThe last thing to do is to configure the build and test settings. To do so, open\nyour text editor and enter the following:\n\n```\nstages:\n  - build\n\nbuild_project:\n  stage: build\n  script:\n    - xcodebuild clean -project ProjectName.xcodeproj -scheme SchemeName | xcpretty\n    - xcodebuild test -project ProjectName.xcodeproj -scheme SchemeName -destination 'platform=iOS Simulator,name=iPhone 6s,OS=9.2' | xcpretty -s\n  tags:\n    - ios_9-2\n    - xcode_7-2\n    - osx_10-11\n```\n\nSave this file in your Xcode project folder as `.gitlab-ci.yml`, and don't forget\nthe period at the beginning of the file name!\n\n> **Update:** To clarify, the `.gitlab-ci.yml` file should go in the folder you created for your iOS project, which is also typically where your Xcode project file (`ProjectName.xcodeproj`) is found. Thanks to commenter Palo for pointing this out!\n\nLet's go through the file with some detail:\n\n- The file first describes the `stages` available to each `job`. For simplicity,\nwe have one stage (`build`) and one job (`build_project`).\n- The file then provides the settings for each `job`. The `build_project` job runs\ntwo scripts: one to clean the Xcode project, and then another to build and test\nit. You can probably skip the cleaning script to save time, unless you want to be\nsure that you're building from a clean state.\n- Under `tags`, add the tags you created when you registered the GitLab Runner.\n\nThere are also some things to look out for:\n\n- Make sure to replace all references to `ProjectName` with the name of your\nXcode project; if you're using a different scheme than the default, then make\nsure you pass in the proper `SchemeName` too (the default is the same as the\n`ProjectName`).\n- In the `xcodebuild test` command, notice the `-destination` option is set to\nlaunch an iPhone 6S image running iOS 9.2 in the Simulator; if you want to run a\ndifferent device (iPad, for example), you'll need to change this.\n- If you're using a workspace rather than a project (e.g., because your app uses [Cocoapods](https://cocoapods.org)), change the `-project ProjectName.xcodeproj`\noptions to `-workspace WorkspaceName.xcworkspace`. There are several options\navailable to customize your build; run `xcodebuild --help` in the Terminal to\nexplore these further.\n\nThere's a simple tool for \"linting\" (i.e., validating) your `.gitlab-ci.yml` in\nGitLab. From your GitLab project page, click on CI/CD > Jobs in the sidebar, then in\nthe upper-right corner, click on **CI lint**:\n\n![Accessing the GitLab CI script linter.](https://about.gitlab.com/images/blogimages/setting-up-gitlab-for-ios-projects/7_ci-lint-button.png)\n\nPaste the contents of your `.gitlab-ci.yml` file into the text box and click on\n**Validate**. You should see something like:\n\n> **Status:** syntax is correct\n\nThis won't tell you if your project name or the Simulator chosen is correct, so\nbe sure to double-check these settings.\n\nThe `.gitlab-ci.yml` file is extremely customizable. You can limit jobs to run\non success or failure, or depending on branches or tags, etc.&mdash;read through\n[the documentation](http://doc.gitlab.com/ce/ci/yaml/README.html) to get a feeling\nfor just how flexible and powerful it is.\n\n### Setting up your GitLab project for CI\n\nActually, there's really not much to do here! CI is enabled by default on new\nprojects. If your iOS project has some environment variables you want to keep\nsecret, but you want to keep the project public on GitLab, you may want to disable\n**Public builds** in Project Settings, under _Continuous Integration_. This will\nhide the build results from everyone except members of the project.\n\nYou may also want to go to _Runners_ under your Project Settings and click\n**Disable shared runners**, as they're not needed anyhow&mdash;we're using a\nproject-specific runner.\n\nWe're now ready to trigger a CI build!\n\n### How to trigger builds\n\nTo trigger a build, all you have to do is push a commit to GitLab. From the Terminal:\n\n```\n$ git add .\n$ git commit -m \"First commit.\"\n[...commit info...]\n$ git push origin master\n[...push info...]\n```\n\nIf everything worked, and you installed the GitLab Runner on the same machine,\nyou'll notice that Simulator launches, installs your iOS app and launches it, and\nthen goes back to the home screen.\n\nGo to the *Builds* page of your GitLab project and have a look at the results!\n\n![The Build page after your first CI build.](https://about.gitlab.com/images/blogimages/setting-up-gitlab-for-ios-projects/8_build-page-success.png)\n\nClick on the \u003Cspan style=color:green>✔︎ success\u003C/span> button to see the build output:\n\n![The build results page.](https://about.gitlab.com/images/blogimages/setting-up-gitlab-for-ios-projects/9_build-results.png)\n\nHere you'll see the output from all the steps you requested in your `.gitlab-ci.yml`\nfile. At the bottom of the log, you should see something like:\n\n```\nAll tests\nTest Suite GitLab-CI-for-iOSTests.xctest started\nGitLab_CI_for_iOSTests\n    . testExample (0.001 seconds)\n    T testPerformanceExample measured (0.000 seconds)\n    . testPerformanceExample (0.324 seconds)\n\n\n\t Executed 2 tests, with 0 failures (0 unexpected) in 0.325 (0.328) seconds\n\nAll tests\nTest Suite GitLab-CI-for-iOSUITests.xctest started\nGitLab_CI_for_iOSUITests\n    . testExample (3.587 seconds)\n\n\n\t Executed 1 test, with 0 failures (0 unexpected) in 3.587 (3.589) seconds\n\n\nBuild succeeded.\n```\n\nNow you can go ahead and start writing tests for your code, and every time you\npush a commit, GitLab will diligently fetch the project, clean it, and then build\nand test it. If the build fails, you can take action to fix the commit.\n\n### Starting and stopping the runner on your Mac\n\nThe GitLab Runner includes several convenient commands, which you can list easily:\n\n```\n$ gitlab-ci-multi-runner --help\nNAME:\n   gitlab-ci-multi-runner - a GitLab Runner\n\nUSAGE:\n   gitlab-ci-multi-runner [global options] command [command options] [arguments...]\n\nVERSION:\n   1.0.4 (014aa8c)\n\nAUTHOR(S):\n   Kamil Trzciński \u003Cayufan@ayufan.eu>\n\nCOMMANDS:\n   archive\tfind and archive files (internal)\n   artifacts\tupload build artifacts (internal)\n   extract\textract files from an archive (internal)\n   exec\t\texecute a build locally\n   list\t\tList all configured runners\n   run\t\trun multi runner service\n   register\tregister a new runner\n   install\tinstall service\n   uninstall\tuninstall service\n   start\tstart service\n   stop\t\tstop service\n   restart\trestart service\n   status\tget status of a service\n   run-single\tstart single runner\n   unregister\tunregister specific runner\n   verify\tverify all registered runners\n   help, h\tShows a list of commands or help for one command\n\nGLOBAL OPTIONS:\n   --debug\t\t\tdebug mode [$DEBUG]\n   --log-level, -l \"info\"\tLog level (options: debug, info, warn, error, fatal, panic)\n   --help, -h\t\t\tshow help\n   --version, -v\t\tprint the version\n```\n\nYou may want to stop the Runner so that a build isn't immediately triggered by a\npushed commit:\n\n```\n$ gitlab-ci-multi-runner stop\n$ gitlab-ci-multi-runner status\ngitlab-runner: Service is not running.\n```\n\nIn this case, any builds pushed will show up as **pending** and will be triggered\nas soon as you restart the Runner:\n\n```\n$ gitlab-ci-multi-runner start\n$ gitlab-ci-multi-runner status\ngitlab-runner: Service is running!\n```\n\nAny pending builds in the queue will then be triggered, launching Simulator and\nrunning the test suite normally.\n\n### Advanced: archiving the project automatically\n\nLet's say that, if we commit to the `master` branch, we want GitLab CI to not only build and\ntest the project, but also provide some continuous delivery, where it creates an\napplication archive, and uploads it to GitLab.\n\nWe start by modifying our `.gitlab-ci.yml` file to add an `archive` stage and an\n`archive_project` job:\n\n```\nstages:\n  - build\n  - archive\n\nbuild_project:\n  stage: build\n  script:\n    - xcodebuild clean -project ProjectName.xcodeproj -scheme SchemeName | xcpretty\n    - xcodebuild test -project ProjectName.xcodeproj -scheme SchemeName -destination 'platform=iOS Simulator,name=iPhone 6s,OS=9.2' | xcpretty -s\n  tags:\n    - ios_9-2\n    - xcode_7-2\n    - osx_10-11\n\narchive_project:\n  stage: archive\n  script:\n    - xcodebuild clean archive -archivePath build/ProjectName -scheme SchemeName\n    - xcodebuild -exportArchive -exportFormat ipa -archivePath \"build/ProjectName.xcarchive\" -exportPath \"build/ProjectName.ipa\" -exportProvisioningProfile \"ProvisioningProfileName\"\n  only:\n    - master\n  artifacts:\n    paths:\n    - build/ProjectName.ipa\n  tags:\n    - ios_9-2\n    - xcode_7-2\n    - osx_10-11\n```\n\nThe `archive_project` job runs two scripts: the first cleans and archives your\nXcode project, and the second builds an `.ipa` file; the `only` setting means that\nthis job will only run when we commit to master. Notice that it also defines\n`artifacts`; after the `ProjectName.ipa` application archive is created, this\noption uploads it to GitLab, where you can later download it from the *Build* page.\n\nIn the `archive_project` job's `exportArchive` script, make sure you pass in the\ncorrect `ProvisioningProfileName`. It's possible that the `archive_project` job\nwill fail if your developer keys are in the login Keychain, because it's not unlocked\nin the script. The simplest way to fix this without putting your password in a\nscript is to open Keychain Access on your Mac and drag and drop them to the System\nKeychain.\n\nNow, when we commit to master, the build will also show us the archive results,\nalong with the option to download or browse our build artifacts!\n\n![The archive results page.](https://about.gitlab.com/images/blogimages/setting-up-gitlab-for-ios-projects/10_archive-results.png)\n\n### Other salient points\n\n- This workflow should work for *any* kind of Xcode project, including tvOS,\nwatchOS, and macOS. Just be sure to specify the appropriate Simulator device in\nyour `.gitlab-ci.yml` file.\n- If you want to push a commit but don't want to trigger a CI build, simply add\n`[ci skip]` to your commit message.\n- If the user that installed the GitLab runner isn't logged in, the runner won't\nrun. So, if builds seem to be pending for a long time, you may want to check on\nthis!\n- If you're working on a team, or if your project is public, you may want to install\nthe GitLab Runner on a dedicated build machine. It can otherwise be very distracting\nto be using your machine and have Simulator launch unexpectedly to run a test suite.\n- The test project used in this particular tutorial is [available here](https://gitlab.com/AngeloStavrow/gitlab-ci-for-ios-projects),\nbut the Runner is permanently stopped. Note that the project isn't tied to a particular\nteam, so provisioning isn't an issue here; in fact, *no* provisioning profile is specified.\nYou, however, may need to [add some parameters to the build scripts](https://coderwall.com/p/rv2lgw/use-xcodebuild-to-build-workspace-vs-project)\nin your `.gitlab-ci.yml` file if you see provisioning errors in your build output.\n\n## About guest author Angelo Stavrow\n\n[Angelo](http://angelostavrow.com) is a Quality Engineer and Software Developer\nliving in Montreal, Canada. He believes that open, honest, collaboration is the\nbest path towards building great things *and* great teams.\n\n",{"slug":9234,"featured":6,"template":678},"setting-up-gitlab-ci-for-ios-projects","content:en-us:blog:setting-up-gitlab-ci-for-ios-projects.yml","Setting Up Gitlab Ci For Ios Projects","en-us/blog/setting-up-gitlab-ci-for-ios-projects.yml","en-us/blog/setting-up-gitlab-ci-for-ios-projects",{"_path":9240,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9241,"content":9247,"config":9252,"_id":9254,"_type":16,"title":9255,"_source":17,"_file":9256,"_stem":9257,"_extension":20},"/en-us/blog/gitlab-tutorial-its-all-connected",{"title":9242,"description":9243,"ogTitle":9242,"ogDescription":9243,"noIndex":6,"ogImage":9244,"ogUrl":9245,"ogSiteName":692,"ogType":693,"canonicalUrls":9245,"schema":9246},"Tutorial: It's all connected in GitLab","In GitLab, everything you do can be cross-linked and referenced. This improves discoverability and reduces duplicate effort.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683732/Blog/Hero%20Images/stars.png","https://about.gitlab.com/blog/gitlab-tutorial-its-all-connected","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: It's all connected in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Heather McNamee\"}],\n        \"datePublished\": \"2016-03-08\",\n      }",{"title":9242,"description":9243,"authors":9248,"heroImage":9244,"date":9250,"body":9251,"category":14},[9249],"Heather McNamee","2016-03-08","\n\nIn GitLab, everything you do can be cross-linked and referenced.\nThis improves discoverability and reduces duplicate effort.\n\nGitLab is more than just a Git repository manager.\nThere are a number of tools to help you collaborate with others, or even\njust manage a project yourself.\nThe best features of GitLab help you link and reference related work.\n\n\u003C!-- more -->\n\n## Your issue is the Single Source of Truth\n\nAs mentioned in my previous post, [always start with an issue][start-issue].\n\nThis way you have everything you need to know about the issue in one place.\nYou can use [GitLab Flavored Markdown] to format content in the issue.\n\nLater, as you make decisions, keep the issue description updated as the \"Single\nSource of Truth\" particularly when you hand off the issue and assign to someone else.\nWe use this as a [guideline in our own handbook about our workflow][ssot].\n\n> When reassigning an issue, make sure that the issue body contains the\nlatest information. The issue body should be the single source of truth.\n\nLink directly to other issues and even comments to identify where you have\nmade decisions. The links will get formatted to be more readable.\n\n![Link to comments](https://about.gitlab.com/images/blogimages/connect-commentlink.png)\n\n## How to reference what's related in issues and MRs\n\nAdd [references] in GitLab issues, merge request descriptions, or in comments on issues or MRs.\nThis will update the issue with info about anything related.\n\n- To reference an issue: `#123`\n- To reference a MR: `!123`\n- To reference a snippet `$123`\n\nYou can make all the same [references] in comments or merge requests as you can\nin an issue description.\n\nThe purpose of the [WIP]: (work in progress) merge request is to encourage early code\nreview, so you can share your work with others.\nIn this way you can reference related work, and hopefully reduce duplicate effort.\n\n## How to add labels and milestones at a glance\n\nAdd [labels] for categorization.\nLabels can have a description and a color.\n\nAdd a [milestone] to set a target date. A milestone is often used for releases,\nbut you can also use milestones to create a simple workflow.\n\nThe [milestone view][milestone-mr] lets you see the status of issues at a glance, and you can\nsee the labels in this view.\n\n![Milestone display of issue](https://about.gitlab.com/images/blogimages/connect-milestone.png)\n\nThe same goes for the display of merge requests tagged under a milestone.\n\n![Milestone display of MRs](https://about.gitlab.com/images/blogimages/connect-milestonemr.png)\n\nYou can also view [milestones across an entire group][group-milestones].\n\n## How to add references in commits and code review\n\nSimilar to references on merge requests, you can add references in commit messages. For example, you could reference an issue ID or URL (in GitLab or Jira) for anyone who could use more context.  \nFurther details are in the documentation: [How to write a good commit message](https://docs.gitlab.com/ee/topics/gitlab_flow.html#how-to-write-a-good-commit-message)\n\n> `git commit -m \"Awesome commit message (Fixes #21 and Closes group/otherproject#22)\"`\n\nYour colleague can provide line by line code review.\nCommits also have a comment thread available for discussion,\nand the rules of referencing apply.\nYou can also link directly to lines of code in comments.\n\n## @Mention others and assign\n\nGitLab is a platform for collaborating.\nYou can mention people with @username and ask them to provide feedback in\nmerge requests or issues.\nYou can also assign someone to an issue or MR.\n\nIn either case, the user will see a notice pop up on their [Todos] list.\n\n![Todos](https://about.gitlab.com/images/blogimages/todos-screenshot.jpg)\n\nWhen issues are assigned in GitLab, it's assumed someone is working on it,\nand it appears as \"Ongoing\", open and assigned.\nMake sure to unassign yourself to indicate when you are *not* working on an issue,\nand this will make it more likely that the issue will be picked up.\n\nThere’s always a single person responsible to work on a MR at one time.\nUsually the person who is assigned to the MR is responsible for making any\namendments suggested in review.\n\nWe have a feature if you want to approve merge requests in EE, but there's one\nperson who presses the final merge button.\nHence, only one person can be assigned at a time.\n\n## What will this merge request resolve? Close issues automatically.\n\nIn your merge request description add \"Closes #123\" or \"Fixes #123\".\nWhen you merge that branch, GitLab will close the issues you\nreferenced with Fixes #ID or Closes #ID.\n\n![Close when the MR is merged](https://about.gitlab.com/images/blogimages/connect-relatedmr.png)\n\nYou can use all the collaborative tools we have at the point of a merge request.\nSo you can add all the same references, categorization, and comments that you\ncan in issues.\nThere's a key difference. Merge requests generally resolve issues.\nAdding that reference means GitLab can take care of tidying up your issue\ntracker when you're done.\n\n[*Merge when build succeeds*][merge-succeeds] is not just a timesaver, it\nalso helps keep your momentum going.\n\nAfter merging you may want to turn your mind to the next project.\nOr you may simply need a break!\nYou don't want to come back and have to remember to check and then merge,\nand then have to remember to close related issues.\nLet GitLab do that for you.\n\n## Video tutorial: GitLab workflow\n\nIn [this video tutorial](https://www.youtube.com/watch?v=enMumwvLAug), we dig into the GitLab workflow outlined above. This will take you through the steps of making an issue, merge requests, and using tools in GitLab for cross-referencing and keeping your issue tracker organized with labels and milestones.\n\n[merge-succeeds]: http://doc.gitlab.com/ce/workflow/merge_when_build_succeeds.html\n[milestone-mr]: https://gitlab.com/gitlab-org/gitlab-ce/milestones/22\n[group-milestones]: https://gitlab.com/groups/gitlab-org/issues?milestone_title=8.6\n[our monthly release issue]: http://doc.gitlab.com/ee/release/monthly.html#create-an-overall-issue-and-follow-it\n[create-project]: https://docs.gitlab.com/ee/user/project/working_with_projects.html#create-a-project\n[ssot]: /handbook/communication/#gitlab-workflow\n[start-issue]: /blog/start-with-an-issue/\n[webcast]: http://page.gitlab.com/mar-2016-gitlab-introduction.html\n[glf-commit]: http://doc.gitlab.com/ee/workflow/gitlab_flow.html#committing-often-and-with-the-right-message\n[GitLab Flow]: http://doc.gitlab.com/ee/workflow/gitlab_flow.html\n[Patricio-tips]: /2015/02/19/8-tips-to-help-you-work-better-with-git/\n[Todos]: http://doc.gitlab.com/ce/workflow/todos.html\n[WIP]: http://doc.gitlab.com/ce/workflow/wip_merge_requests.html\n[gitlab-ui]: /2016/02/10/feature-highlight-create-files-and-directories-from-files-page/\n[GitLab Flavored Markdown]: http://doc.gitlab.com/ce/markdown/markdown.html\n[task lists]: http://doc.gitlab.com/ce/markdown/markdown.html#task-lists\n[labels]: http://doc.gitlab.com/ce/workflow/labels.html#sts=Labels\n[milestone]: http://doc.gitlab.com/ce/workflow/milestones.html\n[references]: https://docs.gitlab.com/ee/user/markdown.html#special-gitlab-references\n",{"slug":9253,"featured":6,"template":678},"gitlab-tutorial-its-all-connected","content:en-us:blog:gitlab-tutorial-its-all-connected.yml","Gitlab Tutorial Its All Connected","en-us/blog/gitlab-tutorial-its-all-connected.yml","en-us/blog/gitlab-tutorial-its-all-connected",{"_path":9259,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9260,"content":9265,"config":9270,"_id":9272,"_type":16,"title":9273,"_source":17,"_file":9274,"_stem":9275,"_extension":20},"/en-us/blog/gitlab-runner-with-docker",{"title":9261,"description":9262,"ogTitle":9261,"ogDescription":9262,"noIndex":6,"ogImage":2478,"ogUrl":9263,"ogSiteName":692,"ogType":693,"canonicalUrls":9263,"schema":9264},"Setting up GitLab Runner For Continuous Integration","This tutorial will demonstrate how to get started with a CI workflow using GitLab Runner.","https://about.gitlab.com/blog/gitlab-runner-with-docker","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Setting up GitLab Runner For Continuous Integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ahmet Kizilay\"}],\n        \"datePublished\": \"2016-03-01\",\n      }",{"title":9261,"description":9262,"authors":9266,"heroImage":2478,"date":9268,"body":9269,"category":14},[9267],"Ahmet Kizilay","2016-03-01","\n\nThere are many cloud-based [continuous integration (CI)](/solutions/continuous-integration/) providers out there and\nmost of them generously offer free plans for open-source projects.\nWhile this is great for the open-source community, paid plans and tiers can get a little\nbit too expensive for small start-ups that would prefer to keep their source code private.\nIn such an ecosystem, GitLab Inc. stands out as a viable option with unlimited private\nrepositories and its GitLab Runner, a free and open-source tool to automate the\ntesting and building of projects, thus giving software\ndevelopers the freedom to experiment with different approaches to build the\noptimal pipeline for their needs.\n\n## How to get started with a CI workflow using GitLab Runner\n\nThis tutorial will demonstrate how to get started with a CI workflow using\n[GitLab Runner](https://gitlab.com/gitlab-org/gitlab-runner) and its built-in Docker executor.\nWe will first set up a sample NodeJS project hosted on Gitlab.com to run tests\non build machines provided by GitLab Inc.\nThen, we will set up and configure our own specific runner on a private server.\nFinally, we will go over some good practices to speed up the build time as the project grows.\nBy the end of this tutorial you will feel comfortable building your own\nCI solution, custom-tailored for your existing projects.\nThis post will be useful for project managers looking for affordable\nCI solutions and developers wanting to build test-driven\nand sustainable software projects.\n\n\u003C!-- more -->\n\n## Introducing the sample project\n\nBefore we start with the GitLab Runner, let's briefly review this simple\n[NodeJS project](https://gitlab.com/gitlab-examples/nodejs/) we will\nwork with throughout this tutorial.\nOur project contains two independent modules we would like to test.\nOne module consists of some utility methods for asynchronous operations.\nThe other module implements a simple wrapper around a PostgreSQL database to\ninsert and retrieve records. Thanks to the latter module, we will need a\ndatabase instance in our testing environment to run the tests.\n\nAfter starting a test database, we can run the tests locally and see all tests pass.\n\n```\nDB_USER=[db-username] \\\nBD_PASS=[db-password] \\\nDB_HOST=[db-host:db-port] \\\nnode ./specs/start.js\n```\n\nFeel free to explore the project [source code](https://gitlab.com/gitlab-examples/nodejs/tree/master)\nif you are interested.\n\n## How to get started with GitLab Runner\n\nGitLab Runner is triggered with every push to the central repository or branch if a\n`.gitlab-ci.yml` file is present (unless explicitly configured not to).\nThis file specifies how the build environment should be set up and what\ncommands to be executed to build, test, and deploy our project in a series of\njobs that can be parallelized.\n\n## How to use Docker executor\n\nIn this tutorial, we will be using GitLab Runner's\nbuilt-in **docker executor** to set up the build environment.\nThis executor provides a powerful abstraction that uses Docker Engine in the\nbackground to load our app and run the tests in a Docker container.\nIn addition, this Docker executor conveniently starts any dependent services (such as\ndatabases) before running jobs and links containers to communicate with each other.\n\n### Creating `.gitlab-ci.yml` file\n\nOur first task is to add our `.gitlab-ci.yml` file to the root directory of our project.\n\n```\nimage: node:4.2.2\n\nservices:\n  - postgres:9.5.0\n\nall_tests:\n  script:\n   - npm install\n   - node ./specs/start.js\n```\n\nNow let's go over the parts of this file.\n\nThe first line specifies the base image against which our tests will run.\nSince we are testing a NodeJS app, our base image will be a recent NodeJS version.\n\nThe `services` section is where external dependencies are listed.\nIn our tests, we need a PostgreSQL database, so we add the image name for this database.\nAny Docker image name can be specified here, such as `mysql` for MySQL databases.\nThe database will start with default credentials before tests run and it will be\naccessible under the host name `postgres` on the default PostgreSQL port, 5432.\n\nIf we needed to use any credentials other than the defaults, we could add them\ninside the `variables` tag. Values under this tag are passed to all services\non initialization. As per the PostgreSQL service [documentation](http://doc.gitlab.com/ce/ci/services/postgres.html),\nthe following settings will overwrite the user and password for our database:\n\n```\nimage: node:4.2.2\n\nvariables:\n  POSTGRES_USER: testuser\n  POSTGRES_PASSWORD: testpass\n\nservices:\n  - postgres:9.5.0\n\nall_tests:\n  script:\n   - npm install\n   - node ./specs/start.js\n```\n\nNote that since these test credentials are internal to our project, it is OK to simply\nadd them to the `.gitlab-ci.yml` file. However, you should register any external\nconfiguration variables, such as API keys, in the **Secure Variables**\npage (**Settings -> Variables**) and reference them here by name.\n\nIn the final section, we define a job named `all_tests`, which contains the command\nthat will run our tests.\nIn the `script` subsection here, we simply add our commands to install the dependencies\nand start the tests.\n\nTo be extra cautious, we could lint-check our yml file on the\n[GitLab CI Lint page](https://gitlab.com/ci/lint) to see the breakdown of\nthe build steps to make sure we don't have a typo.\n\n## How to use Shared Runners\n\nGitLab Inc. provides a number of servers with GitLab Runner installed.\nOn the **Runners** page (**Settings -> Runners**), we can see the list of currently available runners.\nWe should see that Shared Runners are already available for us, so we can immediately queue our first build by simply pushing our `.gitlab-ci.yml` file to our repository.\nWe can track the progress of our build on the [Builds page](https://gitlab.com/gitlab-examples/nodejs/builds).\nOnce our build starts, we should see that it completes with success in a couple of minutes.\n\n### How to install a specific runner\n\nWhile these Shared Runners are great to get a sense of how to get started with\nCI, we will now install GitLab Runner on a private server to run exclusively\nfor our project.\nWe will use exactly the same [open-source software](https://gitlab.com/gitlab-org/gitlab-runner)\nGitLab.com uses on their Shared Runners, so we will have the extra benefit of\noptimizing and securing our builds for our specific project.\n\n\nWe will need a server instance where we will install the GitLab Runner.\nGitLab Runner can be installed on [Linux](https://gitlab.com/gitlab-org/gitlab-runner/blob/master/docs/install/linux-repository.md), [macOS](https://gitlab.com/gitlab-org/gitlab-ci-multi-runner/blob/master/docs/install/osx.md) and [Windows](https://gitlab.com/gitlab-org/gitlab-ci-multi-runner/blob/master/docs/install/windows.md).\nFor our sample project, a small server instance with 1 GB RAM should be enough.\nIn addition, since we will be running with the Docker executor, we also need to have [Docker Engine](https://docs.docker.com/engine/installation/) installed.\n\nUpon installation, we will register a new runner for our project.\nStart the registration with the following command:\n\n```\ngitlab-ci-multi-runner register\n```\n\nThe registration will walk us through a few steps to configure our registered runner.\n\n```\nRunning in system-mode.\n\nPlease enter the gitlab-ci coordinator URL (e.g. https://gitlab.com/ci):\nhttps://gitlab.com/ci\n```\n\nSince our project is hosted on Gitlab.com, we will use the default\ngitlab-ci coordinator URL.\n\n```\nPlease enter the gitlab-ci token for this runner:\n[your private gitlab-ci token]\n```\n\nOn **Runners** page (**Settings -> Runners**), we will copy the private gitlab-ci token for our project and paste it here.\n\n```\nPlease enter the gitlab-ci description for this runner:\n[ubuntu-2gb-nyc3-01]: new-docker-executor\nPlease enter the gitlab-ci tags for this runner (comma separated):\ndocker\nRegistering runner... succeeded                     runner=8tB1zBiU\n```\n\nIt is a good practice to give a descriptive name and tags for runners to\nbe able to remember and target them later on. We will add the `docker` tag to this\nrunner since we can run any Docker image and services with it.\n\n```\nPlease enter the executor: virtualbox, ssh, shell, parallels, docker, docker-ssh:\ndocker\nPlease enter the default Docker image (eg. ruby:2.1):\nnode:4.2.2\nRunner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\n```\n\nNotice that there are several executor options available.\nIn this post, we are using the `docker` executor.\nRemember from the `.gitlab-ci.yml` file, our base image is already set as node:4.2.2.\nNote that the default Docker image specified here will be used only when `.gitlab-ci.yml`\nfile does not contain an image declaration.\n\nSpecific runners take precedence over the Shared Runners.\nOur project won't be using Shared Runners as long as our specific runner is available.\nIf preferred, we can disable the Shared Runners on the **Runners** page\n(**Settings -> Runners**) by toggling off the Shared Runners button.\n\nFinally, we are ready to trigger a new build.\nWe should see the next build running with our specific runner on our private server.\n\n## How to enable cache\n\nNow that we have a functional CI workflow, let's talk about\nhow to make it faster and more efficient.\n\nTo start, we can eliminate redundant downloading of dependency libraries by\ncaching and restoring dependencies between builds.\nFor NodeJS projects, dependent libraries are installed in a folder called `node_modules`.\nWe should specify this folder to cache in our `.gitlab-ci.yml` file:\n\n```\nimage: node:4.2.2\n\ncache:\n  paths:\n  - node_modules/\n\nservices:\n  - postgres:9.5.0\n\nall_tests:\n  script:\n   - npm install\n   - node ./specs/start.js\n```\n\nAfter a successful build, you should see the `node_modules` folder is archived at\nthe end of builds to be restored at the beginning of following builds.\nAs long as our dependencies file remains unchanged, no new libraries will be\ndownloaded and our total build time will significantly decrease.\n\n### Adding concurrency\n\nAnother modification we can add to speed up the build process is to\nparallelize the tests.\nIn our project, we can split the tests into two jobs.\nOne for the database tests and another for the async module tests.\nFurthermore, we can restrict the PostgreSQL service to run only for our database\njob since we won't need a database for the async module.\n\n```\nimage: node:4.2.2\n\ncache:\n  paths:\n  - node_modules/\n\ntest_async:\n  script:\n   - npm install\n   - node ./specs/start.js ./specs/async.spec.js\n\ntest_db:\n  services:\n    - postgres:9.5.0\n  script:\n   - npm install\n   - node ./specs/start.js ./specs/db-postgres.spec.js\n```\n\nNote that we still need to introduce concurrency to the build.\nTo do that we could either create a new server and register a runner for\nour project, or increase the concurrency level for our existing runner.\nWe will go with the latter and edit the `concurrent` setting on the first\nline of our `config.toml` configuration file in the server.\nIf you installed GitLab Runner as the root user with the deb or rpm packages,\nthe config file will be in `/etc/gitlab-runner/config.toml` by default:\n\n```\nconcurrent = 2\n```\n\nAfter triggering a new build, we can see two jobs running\nat the same time.\n\n## Conclusion\n\nIn this tutorial, we set up automated testing with GitLab Runner and its built-in Docker executor for a NodeJS\nproject to get started with continuous integration on Gitlab.com.\nFor more information on the GitLab Runner and GitLab CI platform or using Docker with them, check out the [documentation](http://doc.gitlab.com/ce/ci/).\nHappy coding!\n\n### About Guest Author: Ahmet Kizilay\n\n[Ahmet Kizilay](https://about.me/ahmetkizilay) is a software developer living in Istanbul.\nHe is currently working as a full-stack developer at [Graph Commons](https://graphcommons.com/).\n",{"slug":9271,"featured":6,"template":678},"gitlab-runner-with-docker","content:en-us:blog:gitlab-runner-with-docker.yml","Gitlab Runner With Docker","en-us/blog/gitlab-runner-with-docker.yml","en-us/blog/gitlab-runner-with-docker",{"_path":9277,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9278,"content":9283,"config":9287,"_id":9289,"_type":16,"title":9290,"_source":17,"_file":9291,"_stem":9292,"_extension":20},"/en-us/blog/making-gitlab-faster",{"title":9279,"description":9280,"ogTitle":9279,"ogDescription":9280,"noIndex":6,"ogImage":2478,"ogUrl":9281,"ogSiteName":692,"ogType":693,"canonicalUrls":9281,"schema":9282},"Making GitLab Faster","In GitLab 8.5 we shipped numerous performance improvements. In this article we'll take a look at some of these changes and the process involved in finding and resolving these issues.","https://about.gitlab.com/blog/making-gitlab-faster","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Making GitLab Faster\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Yorick Peterse\"}],\n        \"datePublished\": \"2016-02-25\",\n      }",{"title":9279,"description":9280,"authors":9284,"heroImage":2478,"date":9285,"body":9286,"category":14},[6025],"2016-02-25","\nIn GitLab 8.5 we shipped numerous performance improvements. In this article\nwe'll take a look at some of these changes and the process involved in finding\nand resolving these issues. In particular we'll look at the following merge\nrequests:\n\n* [Optimize fetching issues closed by a merge request][mr2625]\n* [Improve performance of retrieving last update times for events][mr2613]\n* [Only set autocrlf when creating/updating files][mr2859]\n\n\u003C!--more-->\n\n## Performance Monitoring & Tooling\n\nWithout a proper production performance monitoring system and a good set of\ntools it's nearly impossible to find and resolve performance problems. GitLab\ncomes with two systems to make it possible to measure application performance:\n\n* [GitLab Performance Monitoring][monitoring]: a monitoring system using\n  [InfluxDB][influxdb] to track application performance of production\n  environments (though you can also use it during development). Data is then\n  visualized using [Grafana][grafana], though users can use any software capable\n  of extracting data from InfluxDB.\n* Sherlock: a development only monitoring system. Due to the overhead of\n  Sherlock it's not suitable for production environments. For example, Sherlock\n  uses [rblineprof][rblineprof] to track execution timings on a per line basis\n  but this adds quite a bit of overhead.\n\nAnother very useful library is [benchmark-ips][benchmark-ips]. This library can\nbe used to measure the performance of snippets of code while taking care of\nwarming up any caches, Just In Time compilers, etc. For more information see the\n[benchmark-ips README][benchmark-ips-readme].\n\n### Limitations of Benchmarks\n\nWhile we're on the topic of benchmarks it's worth mentioning that benchmarks are\nonly really useful to see the impact of a certain change. For example, if\nbenchmark X can run Y iterations in a certain time period this gives you no\ninsight in how this will perform in a production environment; all it indicates\nis that it can run a certain number of iterations. However, when a certain\nchange results in the benchmark now completing twice as fast things start\ngetting interesting. While we still don't really know how the change will affect\nour production environment we at least know that in the most ideal case\nperformance will be twice as fast.\n\nIn short, just benchmarks aren't enough; you always have to measure (and _keep_\nmeasuring) the performance of code in a production environment. This may seem\nlike common knowledge but a few too many projects out there make bold claims\nabout their performance based solely on a set of benchmarks.\n\nWith that out of the way, let's get started.\n\n## Optimize fetching issues closed by a merge request\n\nCommit messages can be used to automatically close issues by adding the text\n\"Fixes #X\" or \"Closes #X\" to a commit message (where X refers to an issue ID).\nIn turn each merge request shows the list of issues that will be closed whenever\nthe merge request is merged. The description of a merge request can also include\ninclude text such as \"Fixes #X\" to close issues. In other words, the list of\nissues to close is a set composed out of the issues to close as extracted from\nthe commit messages and the issues to close as extracted from the merge\nrequest's description.\n\nWhich brings us to the method `MergeRequest#closes_issues`. This method is used\nto return the list of issues to close (as an Array of `Issue` instances). If we\nlook at the performance of this method over time we see the following:\n\n![MergeRequest#closes_issues Timings][mr2625-timings]\n\nThe small gap at the start of the graph is due to monitoring data only being\nretained for 30 days.\n\nTo summarize the timings:\n\n* A mean of around 500 milliseconds\n* A 95th percentile between 1 and 1.5 seconds\n* A 99th percentile between 1.5 and 2 seconds\n\n2 seconds (in the worst case) to retrieve a list of issues to close is not\nacceptable so it was clear there was some work to be done.\n\nPrior to 8.5 this method was implemented as the following:\n\n    def closes_issues(current_user = self.author)\n       if target_branch == project.default_branch\n         issues = commits.flat_map { |c| c.closes_issues(current_user) }\n         issues.push(*Gitlab::ClosingIssueExtractor.new(project, current_user).\n                    closed_by_message(description))\n         issues.uniq(&:id)\n       else\n         []\n       end\n    end\n\nWhen the target branch of a merge request equals the project's default branch\nthis method takes the following steps:\n\n1. For every commit in the merge request, grab the issues that should be closed\n   when the merge request is merged.\n2. Append the list of issues to close based on the merge request's description\n   to the list of issues created in step 1.\n3. Remove any duplicate issues (based on the issue IDs) from the resulting list.\n\nWhat stood out here is the following line:\n\n    issues = commits.flat_map { |c| c.closes_issues(current_user) }\n\nFor every commit the method `Commit#closes_issues` would be called, which in\nturn was implemented as the following:\n\n    def closes_issues(current_user = self.committer)\n      Gitlab::ClosingIssueExtractor.new(project, current_user).closed_by_message(safe_message)\n    end\n\nFurther digging revealed that `Gitlab::ClosingIssueExtractor#closed_by_message`\nwould perform two steps:\n\n1. Extract the referenced issue IDs from a String\n2. Run a database query to return a list of corresponding `Issue` objects\n\nNote that the above steps would be performed for _every_ commit in a merge\nrequest, regardless of whether a commit would actually reference an issue or\nnot. As such the more commits a merge request would contain the slower things\nwould get.\n\nIf we look at how `Gitlab::ClosingIssueExtractor#closed_by_message` is\nimplemented and used we see that it operates on a single String and doesn't\nreally care what it contains or where it comes from as long as it contains\nreferences to issue IDs:\n\n    def closed_by_message(message)\n      return [] if message.nil?\n\n      closing_statements = []\n      message.scan(ISSUE_CLOSING_REGEX) do\n        closing_statements \u003C\u003C Regexp.last_match[0]\n      end\n\n      @extractor.analyze(closing_statements.join(\" \"))\n\n      @extractor.issues\n    end\n\nThis got me thinking: what if we concatenate all commit messages together and\npass the resulting String to `Gitlab::ClosingIssueExtractor#closed_by_message`?\nDoing so would mean performance is no longer affected by the amount of commits\nin a merge request.\n\nTo test this I wrote a benchmark to compare the old setup versus the idea I was\ngoing for:\n\n    require 'benchmark/ips'\n\n    project = Project.find_with_namespace('gitlab-org/gitlab-ce')\n    user    = User.find_by_username('yorickpeterse')\n    commits = ['Fixes #1', 'Fixes #2', 'Fixes #3']\n    desc    = 'This MR fixes #1 #2 #3'\n\n    Benchmark.ips do |bench|\n      # A somewhat simplified version of the old code (excluding any actual\n      # commit/merge request objects).\n      bench.report 'old' do\n        issues = commits.flat_map do |message|\n          Gitlab::ClosingIssueExtractor.new(project, user).\n            closed_by_message(message)\n        end\n\n        issues.push(*Gitlab::ClosingIssueExtractor.new(project, user).\n                   closed_by_message(desc))\n\n        issues.uniq(&:id)\n      end\n\n      # The new code\n      bench.report 'new' do\n        messages = commits + [desc]\n\n        Gitlab::ClosingIssueExtractor.new(project, user).\n          closed_by_message(messages.join(\"\\n\"))\n      end\n\n      bench.compare!\n    end\n\nWhen running this benchmark we get the following output:\n\n    Calculating -------------------------------------\n                     old     1.000  i/100ms\n                     new     1.000  i/100ms\n    -------------------------------------------------\n                     old      1.377  (± 0.0%) i/s -      7.000\n                     new      2.807  (± 0.0%) i/s -     15.000  in   5.345900s\n\n    Comparison:\n                     new:        2.8 i/s\n                     old:        1.4 i/s - 2.04x slower\n\nSo in this benchmark alone the new code is around 2 times faster than the old\ncode. The actual number of iterations isn't very relevant, we just want to know\nif we're on the right track or not.\n\nRunning the test suite showed no tests were broken by these changes so it was\ntime to set up a merge request and deploy this to GitLab.com (and of course\ninclude it in the next release, 8.5 in this case) to see the impact in a\nproduction environment. The merge request for this was [\"Optimize fetching\nissues closed by a merge request\"][mr2625]. These changes were deployed around\nthe 12th of February and we can see the impact on GitLab.com in the following\ngraph:\n\n![MergeRequest#closes_issues Timings][mr2625-timings]\n\nThat's right, we went from timings between 0.5 and 2.5 seconds to timings of\nless than 15 milliseconds (method call timings below 15 milliseconds are not\ntracked). Ship it!\n\n## Improve performance of retrieving last update times for events\n\nFor certain activity feeds we provide Atom feeds that users can subscribe to.\nFor example \u003Chttps://gitlab.com/yorickpeterse.atom> provides an Atom feed of\nmy public GitLab.com activity. The feed is built by querying a list of records\nfrom the database called \"events\". The SQL query is rather large as the list of\nevents to return is based on the projects a user has access to (in case of user\nactivity feeds). For example, for my own user profile the query would be as\nfollowing:\n\n    SELECT events.*\n    FROM events\n    LEFT OUTER JOIN projects ON projects.id = events.project_id\n    LEFT OUTER JOIN namespaces ON namespaces.id = projects.namespace_id\n    WHERE events.author_id IS NOT NULL\n    AND events.author_id = 209240\n    AND (\n        projects.id IN (\n            SELECT projects.id\n            FROM projects\n            WHERE projects.id IN (\n                -- All projects directly owned by a user.\n                SELECT projects.id\n                FROM projects\n                INNER JOIN namespaces ON projects.namespace_id = namespaces.id\n                WHERE namespaces.owner_id = 209240\n                AND namespaces.type IS NULL\n\n                UNION\n\n                -- All projects of the groups a user is a member of\n                SELECT projects.id\n                FROM projects\n                INNER JOIN namespaces ON projects.namespace_id = namespaces.id\n                INNER JOIN members ON namespaces.id = members.source_id\n                WHERE namespaces.type IN ('Group')\n                AND members.type IN ('GroupMember')\n                AND members.source_type = 'Namespace'\n                AND members.user_id = 209240\n\n                UNION\n\n                -- All projects (that don't belong to one of the groups of a\n                -- user) a user is a member of\n                SELECT projects.id\n                FROM projects\n                INNER JOIN members ON projects.id = members.source_id\n                WHERE members.type IN ('ProjectMember')\n                AND members.source_type = 'Project'\n                AND members.user_id = 209240\n            )\n\n            UNION\n\n            -- All publicly available projects, regardless of whether we still\n            -- have access or not.\n            SELECT projects.id\n            FROM projects\n            WHERE projects.visibility_level IN (20, 10)\n        )\n    )\n    ORDER BY events.id DESC;\n\nThis particular query is quite the behemoth but currently this is the easiest\nway of getting a list of events for projects a user has access to.\n\nOne of the bits of information provided by an Atom feed is a timestamp\nindicating the time the feed was updated. This timestamp was generated using the\nmethod `Event.latest_update_time` which would take a collection of events and\nreturn the most recent update time. This method was implemented as the following:\n\n    def latest_update_time\n      row = select(:updated_at, :project_id).reorder(id: :desc).take\n\n      row ? row.updated_at : nil\n    end\n\nThis method is broken up in two steps:\n\n1. Order the collection in descending order, take the first record\n2. If there was a record return the `updated_at` value, otherwise return `nil`\n\nThis method was then used as the following in the Atom feed (here `xml.updated`\nwould generate an `\u003Cupdated>` XML element):\n\n    xml.updated @events.latest_update_time.xmlschema if @events.any?\n\nPerformance of this method was less than stellar (the blue bars are the timings\nof `Event.latest_update_time`):\n\n![Event.latest_update_time Timings][mr2613-timings]\n\nIn this graph we can see the timings quite often hover around 10 seconds. That's\n10 seconds _just_ to get the latest update time from the database. Ouch!\n\nAt first I started messing around with using the SQL `max()` function instead of\na combination of `ORDER BY` and `LIMIT 1`. We were using this in the past and I\nexplicitly removed it because it was performing worse at the time. Since quite a\nbit changed since then I figured it was worth re-investigating the use of this\nfunction. The process of looking into this as well as my findings can be found\nin issue [12415](https://gitlab.com/gitlab-org/gitlab-ce/issues/12415).\n\nA couple of days after I first started looking into this issue I realized there\nwas a far easier solution to this problem. Since retrieving the list of events\nitself (without using the above code) is already quite fast and is already\nsorted in the right order we can simply re-use this list. That is, we'd take the\nfollowing steps:\n\n1. Query the list of events.\n2. Cast the list of events from an ActiveRecord query result to an Array (this\n   is done anyway later on as we have to generate XML for every event).\n3. Take the `updated_at` value of the first event in this list, if present.\n\nThis led to merge request\n[\"Improve performance of retrieving last update times for events\"][mr2613]. This\nmerge request also contains a few other changes so certain records aren't loaded\ninto memory when not needed, but the gist of it is that instead of this:\n\n    xml.updated @events.latest_update_time.xmlschema if @events.any?\n\nWe now use this:\n\n    xml.updated @events[0].updated_at.xmlschema if @events[0]\n\nAs a result of this the method `Event.latest_update_time` was no longer needed\nand thus was removed. This in turn drastically reduced the loading times of all\nAtom feeds (not just user feeds).\n\n## Only set autocrlf when creating/updating files\n\nGit has an option called `core.autocrlf` which can be used to automatically\nconvert line endings in text files. This option can be set to 3 values:\n\n1. `true`: CRLF line endings are always converted to LF line endings\n2. `false`: no conversion takes place\n3. `input`: converts CRLF line endings to LF upon committing changes\n\nGitLab supports 3 ways of committing changes to a Git repository:\n\n1. Via a Git client\n2. Via the web editor\n3. Via the API\n\nIn the last 2 cases we want to make sure CRLF line endings are replaced with LF\nline endings. For example, browsers use CRLF even on non Windows platforms. To\ntake care of this our documentation recommends users to configure Git to set\n`core.autocrlf` to `input`, however we still need to take care of this ourselves\nin case a user didn't configure Git to convert line endings by default. This\nprocess took place in a method called `Repository#raw_repository` which was\nimplemented as the following:\n\n    def raw_repository\n      return nil unless path_with_namespace\n\n      @raw_repository ||= begin\n        repo = Gitlab::Git::Repository.new(path_to_repo)\n        repo.autocrlf = :input\n        repo\n      rescue Gitlab::Git::Repository::NoRepository\n        nil\n      end\n    end\n\nThis particular method is used in quite a number of places and is used on almost\nevery (if not every) project-specific page (issues, milestones, the project\nhomepage, etc). Performance of this method was, well, bad:\n\n![Gitlab::Git::Repository#autocrlf= Timings][mr2859-bars]\n\nThis particular graph plots the 95th percentile of the method\n`Gitlab::Git::Repository#autocrlf=` which is used to set the `core.autocrlf`\noption. We can see that on average the 95th percentile hovers around 500\nmilliseconds. That's 500 milliseconds on almost every page to set a Git option\nthat's already set 99% of the time. More importantly, that's 500 milliseconds of\ntime wasted on many pages where no changes are ever written to a Git repository,\nthus never using this option.\n\nIt's clear that we _don't_ want to run this on every page, especially when the\noption is not going to be used. However, we still have to make sure this option\nis set when we _do_ need it. At this point my first thought was to see the\noverhead of always writing this option versus only writing this when actually\nneeded. In Ruby code this would roughly translate to:\n\n    repo = Gitlab::Git::Repository.new(path_to_repo)\n\n    # Only set autocrlf to :input if it's not already set to :input\n    repo.autocrlf = :input unless repo.autocrlf == :input\n\nThe idea was that when sharing a disk over the network (e.g. via an NFS server)\na read is probably much faster than a write. A write may also end up locking\nfiles for the duration, possibly blocking other read operations. To test this I\nwrote a script that would perform said operation a number of times and write the\ntimings to InfluxDB. This script is as the following:\n\n    require 'rugged'\n    require 'thread'\n    require 'benchmark'\n    require 'influxdb'\n\n    Thread.abort_on_exception = true\n\n    path = '/var/opt/gitlab/git-data/repositories/yorickpeterse/cat-pictures.git'\n    key  = 'core.autocrlf'\n    read = true\n\n    influx_options = { udp: { host: 'HOST', port: PORT } }\n\n    threads = 10.times.map do\n      Thread.new do\n        client = InfluxDB::Client.new(influx_options)\n\n        while read\n          time = Benchmark.measure do\n            repo = Rugged::Repository.new(path)\n\n            repo.config[key] = 'input' unless repo.config[key] == 'input'\n          end\n\n          ms = time.real * 1000\n\n          client.write_point('rugged_config_cas', values: { duration: ms })\n\n          sleep 0.05\n        end\n      end\n    end\n\n    sleep(120)\n\n    read = false\n\n    threads.each(&:join)\n\n    Rugged::Repository.new(path).config[key] = 'input'\n\nHere HOST and PORT were replaced with the hostname and port number of our\nInfluxDB server.\n\nRunning this script produced the following graph:\n\n![Timings for writing autocrlf when needed](https://about.gitlab.com/images/making_gitlab_faster/autocrlf_write_when_needed.png)\n\nNext I modified this script to simply always write the autocrlf option, this\nproduced the following graph:\n\n![Timings for always writing autocrlf](https://about.gitlab.com/images/making_gitlab_faster/autocrlf_always_write.png)\n\nFinally I modified the script to simply load the repository as-is, this produced\nthe following graph:\n\n![Timings for only reading](https://about.gitlab.com/images/making_gitlab_faster/autocrlf_read_only.png)\n\nIn all 3 cases we can see there's not really a clear difference in timings,\nleading me to believe there's no particular benefit to only writing the option\nwhen not already set to \"input\".\n\nI spent some more time trying out different things to see how they would impact\nperformance but sadly didn't get much out of it. The details can be found in the\nvarious comments for [issue 13457](https://gitlab.com/gitlab-org/gitlab-ce/issues/13457).\n\nA day later I and [Jacob Vosmaer][jacob] decided to double check the idea of\nwriting only when needed by applying a small patch to GitLab.com. This patch\nmodified `Repository#raw_repository` to the autocrlf option would only be\nwritten when needed just like the script above. We also made sure to measure the\ntimings of both reading and writing this option. After deploying this patch and\nwaiting for about half an hour to get enough data the timings were as the\nfollowing:\n\n![autocrlf reads vs writes](https://about.gitlab.com/images/making_gitlab_faster/autocrlf_reads_vs_writes.png)\n\nThis graph shows a nice drop in timings for writing the autocrlf option, sadly\nat the cost of an increase in timings for reading the autocrlf option. In other\nwords, this change didn't actually solve anything but instead just moved the\nproblem from writing an option to just reading the option.\n\nAfter discussing this with Jacob he suggested it may be an even better idea to\nonly set this option where we actually need it to, instead of checking (and\npotentially writing) it on every page that happens to use\n`Repository#raw_repository`. After all, the best way to speed code up is to\nremove it entirely (or at least as much as possible).\n\nThis lead to merge request\n[\"Only set autocrlf when creating/updating files\"][mr2859] which does exactly\nthat. The impact of this change can be seen in the following graph:\n\n![Merge Request Timings Impact](https://about.gitlab.com/images/making_gitlab_faster/autocrlf_timings_impact.png)\n\nThis graph shows the 95th percentile, 99th percentile, and the mean per 30\nminutes. The drop around the 20th is after the above merge request was deployed\nto GitLab.com. The changes in this merge request resulted in the timings going\nfrom between 70 milliseconds and 2.1 seconds to less than 15 milliseconds.\n\n## Conclusion\n\nIn this article I only highlighted 3 merge requests that made it into 8.5.0. The\nfollowing performance related merge requests are also included in 8.5.0:\n\n* [First pass at deleting projects in the background](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/2569)\n* [Background process note logic](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/2631)\n* [Page project list on dashboard](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/2689)\n* [Cache BroadcastMessage.current](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/2633)\n* [Smarter flushing of branch statistics caches](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/2769)\n* [Cache various Repository Git operations](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/2752)\n* [Dedicated method for counting commits between refs](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/2707)\n\nThese are just a few of the performance changes we've made over the past few\nmonths, and they certainly won't be the last as there's still a lot of work to\nbe done.\n\n[mr2625]: https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/2625\n[mr2625-timings]: /images/making_gitlab_faster/merge_request_closes_issues.png\n[mr2613]: https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/2613\n[mr2613-timings]: /images/making_gitlab_faster/event_latest_update_time.png\n[mr2859]: https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/2859\n[mr2859-bars]: /images/making_gitlab_faster/gitlab_git_repository_autocrlf_bars.png\n[monitoring]: http://doc.gitlab.com/ce/monitoring/performance/introduction.html\n[influxdb]: https://influxdata.com/time-series-platform/influxdb/\n[grafana]: http://grafana.org/\n[rblineprof]: https://github.com/peek/peek-rblineprof\n[benchmark-ips]: https://github.com/evanphx/benchmark-ips\n[benchmark-ips-readme]: https://github.com/evanphx/benchmark-ips/blob/master/README.md\n[jacob]: https://gitlab.com/jacobvosmaer\n",{"slug":9288,"featured":6,"template":678},"making-gitlab-faster","content:en-us:blog:making-gitlab-faster.yml","Making Gitlab Faster","en-us/blog/making-gitlab-faster.yml","en-us/blog/making-gitlab-faster",{"_path":9294,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9295,"content":9300,"config":9304,"_id":9306,"_type":16,"title":9307,"_source":17,"_file":9308,"_stem":9309,"_extension":20},"/en-us/blog/gitlab-dot-com-now-supports-an-alternate-git-plus-ssh-port",{"title":9296,"description":9297,"ogTitle":9296,"ogDescription":9297,"noIndex":6,"ogImage":9118,"ogUrl":9298,"ogSiteName":692,"ogType":693,"canonicalUrls":9298,"schema":9299},"GitLab.com now supports an alternate git+ssh port","You'd be happy to know that GitLab.com now runs an alternate git+ssh port (443) which you can use whenever you are in a place where port 22 is blocked.","https://about.gitlab.com/blog/gitlab-dot-com-now-supports-an-alternate-git-plus-ssh-port","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab.com now supports an alternate git+ssh port\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Achilleas Pipinellis\"}],\n        \"datePublished\": \"2016-02-18\",\n      }",{"title":9296,"description":9297,"authors":9301,"heroImage":9118,"date":9302,"body":9303,"category":14},[8273],"2016-02-18","\n\nHave you ever tried to push changes to GitLab and gotten the error\n“port 22: Connection refused\"? The network you're connected to doesn't allow\nusing port 22 and you suddenly can't get your work done. I want to push and I\nwant to push now!\n\nYou'd be happy to know that GitLab.com now runs an alternate `git+ssh` port\n(443) which you can use whenever you are in a place where port 22 is blocked.\n\n\u003C!-- more -->\n\n## The problem\n\nIt's not uncommon that in some places the network traffic is being monitored\nand heavily firewalled, allowing only ports 80 (HTTP) and 443 (HTTPS) to be\nused.\n\nBlocking the standard SSH port is a measure that network sysadmins\noccasionally [have to take](http://serverfault.com/a/25566).\n\n## The solution\n\nLuckily for the users, there is more than one option to overcome this issue.\nOne can use a VPN, Tor or [sshuttle] to alter their network route traffic to\nbe able to use SSH.\n\nBut even then, [VPNs can be blocked][vpn-wiki] and these counter measures\nrequire some knowledge to be set up and used.\n\nThe common solution is to make the SSH daemon listen to a port that is highly\nlikely not to be firewalled, that's why many people prefer the port 443. If you\nare in a position where even port 443 is blocked, you have more serious matters\nto be concerned about.\n\nThere are three potential ways to get around this problem in GitLab. The first\nis to run the SSH server on a different port than the default 22 and\n[configure GitLab] to use that (no user interaction). The second is to run the\nSSH server on a different port and make no changes to GitLab, just instruct the\nusers to use that port in their `.ssh/config`.\n\nThere is a third option which involves port forwarding and avoids changing the\nSSH port in the instance GitLab runs. This gives you the option to have two\ndistinct usable SSH ports and is the case with GitLab.com.\n\n## How GitLab.com implements an alternate SSH port\n\nOur current infrastructure setup goes something like this:\n\n> **GitLab.com > Azure availability set > Loadbalancer (443->443, 80->80, 22->22) > HAProxy nodes -> workers**\n\nNormally you can't just simply use port 443 on the same GitLab instance because\nit runs GitLab itself, and that's assuming you are running GitLab with HTTPS\n(if not you are highly encouraged to do so). In that case, you should better\nuse a separate host which forwards port 443 to port 22 of your GitLab instance.\nYou can do this with HAProxy or any other loadbalancer, or even with IPTables.\n\nIn GitLab.com's case, we have set up a separate Azure availability set with two\nHAProxy nodes exactly the same configured as for GitLab.com. The only thing\nthat differs is the creation of a different Azure loadbalancer in that\navailability set which forwards TCP connections from port 443 to port 22.\n\nSo the new extra setup goes something like this:\n\n> **altssh.gitlab.com > Azure availability set > Loadbalancer (443->22) > HAProxy nodes (lb10,lb11) > workers**\n\n## How to use the alternate SSH connection on GitLab.com\n\nGitLab.com runs a second SSH server that listens on the commonly used port `443`,\nwhich is unlikely to be firewalled.\n\nAll you have to do is edit your `~/.ssh/config` and change the way you\nconnect to GitLab.com. The two notable changes are `Hostname` and `Port`:\n\n```\nHost gitlab.com\n  Hostname altssh.gitlab.com\n  User git\n  Port 443\n  PreferredAuthentications publickey\n  IdentityFile ~/.ssh/gitlab\n```\n\nThe first time you push to `altssh.gitlab.com` you will be asked to verify\nthe server's key fingerprint:\n\n```\nThe authenticity of host '[altssh.gitlab.com]:443 ([104.208.154.249]:443)' can't be established.\nECDSA key fingerprint is SHA256:HbW3g8zUjNSksFbqTiUWPWg2Bq1x8xdGUrliXFzSnUw.\nAre you sure you want to continue connecting (yes/no)?\n```\n\nThat's only normal since you are connecting to the new loadbalancer. If you\nwatch closely, the key fingerprint is\n[the same as in GitLab.com](/pricing/).\n\n",{"slug":9305,"featured":6,"template":678},"gitlab-dot-com-now-supports-an-alternate-git-plus-ssh-port","content:en-us:blog:gitlab-dot-com-now-supports-an-alternate-git-plus-ssh-port.yml","Gitlab Dot Com Now Supports An Alternate Git Plus Ssh Port","en-us/blog/gitlab-dot-com-now-supports-an-alternate-git-plus-ssh-port.yml","en-us/blog/gitlab-dot-com-now-supports-an-alternate-git-plus-ssh-port",{"_path":9311,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9312,"content":9317,"config":9321,"_id":9323,"_type":16,"title":9324,"_source":17,"_file":9325,"_stem":9326,"_extension":20},"/en-us/blog/feature-highlight-create-files-and-directories-from-files-page",{"title":9313,"description":9314,"ogTitle":9313,"ogDescription":9314,"noIndex":6,"ogImage":2478,"ogUrl":9315,"ogSiteName":692,"ogType":693,"canonicalUrls":9315,"schema":9316},"Feature Highlight: Create files and directories from the Files page","In this feature highlight we look at how you can create a new file, directory, branch or tag from the file browser.","https://about.gitlab.com/blog/feature-highlight-create-files-and-directories-from-files-page","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Feature Highlight: Create files and directories from the Files page\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Drew Blessing\"}],\n        \"datePublished\": \"2016-02-10\",\n      }",{"title":9313,"description":9314,"authors":9318,"heroImage":2478,"date":9319,"body":9320,"category":14},[8978],"2016-02-10","\n\nSometimes it's easier to make quick changes directly from the GitLab interface\nthan to clone the project and use the Git command line tool. In this feature\nhighlight we look at how you can create a new file, directory, branch or\ntag from the file browser. All of these actions are available from a single\ndropdown menu.\n\n\u003C!-- more -->\n\n## Create a file\n\nFrom a project's files page, click the '+' button to the right of the branch selector.\nChoose 'New file' from the dropdown.\n\n![New file dropdown menu](https://about.gitlab.com/images/create_files/new_file_dropdown.png)\n\nEnter a file name in the 'File name' box. Then, add file content in the editor\narea. Add a descriptive commit message and choose a branch. The branch field\nwill default to the branch you were viewing in the file browser. If you enter\na new branch name, a checkbox will appear allowing you to start a new merge\nrequest after you commit the changes.\n\nWhen you are satisfied with your new file, click 'Commit Changes' at the bottom.\n\n![Create file editor](https://about.gitlab.com/images/create_files/new_file_editor.png)\n\n## Upload a file\n\nThe ability to create a file is great when the content is text. However, this\ndoesn't work well for binary data such as images, PDFs or other file types. In\nthis case you need to upload a file.\n\nFrom a project's files page, click the '+' button to the right of the branch\nselector. Choose 'Upload file' from the dropdown.\n\n![Upload file dropdown menu](https://about.gitlab.com/images/create_files/upload_file_dropdown.png)\n\nOnce the upload dialog pops up there are two ways to upload your file. Either\ndrag and drop a file on the pop up or use the 'click to upload' link. A file\npreview will appear once you have selected a file to upload.\n\nEnter a commit message, choose a branch, and click 'Upload file' when you are\nready.\n\n![Upload file dialog](https://about.gitlab.com/images/create_files/upload_file_dialog.png)\n\n## Create a directory\n\nTo keep files in the repository organized it is often helpful to create a new\ndirectory.\n\nFrom a project's files page, click the '+' button to the right of the branch selector.\nChoose 'New directory' from the dropdown.\n\n![New directory dropdown](https://about.gitlab.com/images/create_files/new_directory_dropdown.png)\n\nIn the new directory dialog enter a directory name, a commit message and choose\nthe target branch. Click 'Create directory' to finish.\n\n![New directory dialog](https://about.gitlab.com/images/create_files/new_directory_dialog.png)\n\n## Tip\n\nWhen creating or uploading a new file, or creating a new directory, you can\ntrigger a new merge request rather than committing directly to master. Enter\na new branch name in the 'Target branch' field. You will notice a checkbox\nappear that is labeled 'Start a new merge request with these changes'. After\nyou commit the changes you will be taken to a new merge request form.\n\n![Start a new merge request with these changes](https://about.gitlab.com/images/create_files/start_new_merge_request.png)\n\n## Create a new branch\n\nIf you want to make changes to several files before creating a new merge\nrequest, you can create a new branch up front. From a project's files page,\nchoose 'New branch' from the dropdown.\n\n![New branch dropdown](https://about.gitlab.com/images/create_files/new_branch_dropdown.png)\n\nEnter a new 'Branch name'. Optionally, change the 'Create from' field\nto choose which branch, tag or commit SHA this new branch will originate from.\nThis field will autocomplete if you start typing an existing branch or tag.\nClick 'Create branch' and you will be returned to the file browser on this new\nbranch.\n\n![New branch page](https://about.gitlab.com/images/create_files/new_branch_page.png)\n\nYou can now make changes to any files, as needed. When you're ready to merge\nthe changes back to master you can use the widget at the top of the screen.\nThis widget only appears for a period of time after you create the branch or\nmodify files.\n\n![New push widget](https://about.gitlab.com/images/create_files/new_push_widget.png)\n\n## Create a new tag\n\nTags are useful for marking major milestones such as production releases,\nrelease candidates, and more. You can create a tag from a branch or a commit\nSHA. From a project's files page, choose 'New tag' from the dropdown.\n\n![New tag dropdown](https://about.gitlab.com/images/create_files/new_tag_dropdown.png)\n\nGive the tag a name such as `v1.0.0`. Choose the branch or SHA from which you\nwould like to create this new tag. You can optionally add a message and\nrelease notes. The release notes section supports markdown format and you can\nalso upload an attachment. Click 'Create tag' and you will be taken to the tag\nlist page.\n\n![New tag page](https://about.gitlab.com/images/create_files/new_tag_page.png)\n",{"slug":9322,"featured":6,"template":678},"feature-highlight-create-files-and-directories-from-files-page","content:en-us:blog:feature-highlight-create-files-and-directories-from-files-page.yml","Feature Highlight Create Files And Directories From Files Page","en-us/blog/feature-highlight-create-files-and-directories-from-files-page.yml","en-us/blog/feature-highlight-create-files-and-directories-from-files-page",{"_path":9328,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9329,"content":9335,"config":9339,"_id":9341,"_type":16,"title":9342,"_source":17,"_file":9343,"_stem":9344,"_extension":20},"/en-us/blog/getting-started-with-gitlab-and-gitlab-ci",{"title":9330,"description":9331,"ogTitle":9330,"ogDescription":9331,"noIndex":6,"ogImage":9332,"ogUrl":9333,"ogSiteName":692,"ogType":693,"canonicalUrls":9333,"schema":9334},"Getting started with GitLab and GitLab CI","This is the start of a series of posts to get you started with GitLab and GitLab CI.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749684532/Blog/Hero%20Images/stairwaycompressed.jpg","https://about.gitlab.com/blog/getting-started-with-gitlab-and-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Getting started with GitLab and GitLab CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Achilleas Pipinellis\"}],\n        \"datePublished\": \"2015-12-14\",\n      }",{"title":9330,"description":9331,"authors":9336,"heroImage":9332,"date":9337,"body":9338,"category":14},[8273],"2015-12-14","\n\nThis is the start of a series of posts to get you started with GitLab and\nGitLab CI.\n\nIn this first post, we will explain what CI is, why you would use it, and we will\nbriefly explore a higher overview of the components that make GitLab and GitLab\nCI work together.\n\nLet's dive in!\n\n\u003C!-- more -->\n\n## What is this CI thing?\n\nCI stands for [Continuous Integration][ci-wiki] and has gained in popularity\nthe last few years. Together with [Continuous Delivery][cd-wiki] (CD), they\nform the spine of modern [agile software development](/topics/agile-delivery/).\n\nMartin Fowler described this approach in [his article][ci-fowler] as:\n\n> Continuous Integration is a software development practice where members of a\n> team integrate their work frequently, usually each person integrates at least\n> daily - leading to multiple integrations per day. Each integration is\n> verified by an automated build (including test) to detect integration errors\n> as quickly as possible.\n\nThe [benefits of Continuous Integration](/topics/ci-cd/benefits-continuous-integration/) are huge when automation plays an\nintegral part of your workflow.\n\nThey are many applications in the field which try to tackle this practice. The\nmajority of them are either closed source, making you rely on external sources\n(meaning a single point of failure), or need a lot of configuration just to set\nup, let alone the millions of plugins you have to install to meet your needs.\n\nWould you rather have a CI service tightly integrated with your favorite code\nmanagement tool with next to zero configuration? If so, you will love GitLab\nCI!\n\n## Meet GitLab CI\n\nWhat started as a side project three years ago, has now\nbecome one of GitLab's key features. Back then, it was a separate application\nthat talked to GitLab via webhooks. Now, starting from GitLab 8.0,\nGitLab CI has been [fully integrated with GitLab itself][8-post]. No more need\nto set up and maintain another application, which means less work for you or\nyour Ops team.\n\nGitLab CI is enabled by default on new projects, so you can start using its\nfeatures right away. All you need is a file called\n[.gitlab-ci.yml](/blog/implementing-gitlab-ci-dot-yml/) (where you\ndescribe how the build should run) placed in the root directory of your git\nproject, and a configured Runner to perform the actual build.\n\nEach project comes with a Builds page where you can follow the output of each\nbuild, see the commit that introduced it and other useful information such as\nthe time the build started, how long it lasted and the commiter's name.\n\nThe statuses for each build are exposed in the GitLab UI, and you can see\nwhether a build succeeded, failed, got canceled or skipped within a single\nMerge Request or commit, or at the Merge Requests and commits pages.\n\nAnother win having GitLab CI integrated with GitLab is that people can see\nother users' CI/CD approaches and learn from them.\n\nThere is one last component without which, most of the features above wouldn't\nhave been possible. It does all the heavy work by performing the actual builds.\n\nEnter GitLab Runner.\n\n## GitLab Runner\n\nGitLab Runner is the missing piece that leverages the power of GitLab CI. A\nRunner is responsible for the actual build and can be configured to be used\nwith one or many projects. It talks to the GitLab CI API, reads `.gitlab-ci.yml`,\nfollows the steps defined in that file and sends the results to GitLab CI.\n\nSome of the key features of GitLab Runner are:\n\n* [Is open source][runner-repo]\n* Is written in Go and distributed as a single binary without any other\n\trequirements\n* Works on Linux, macOS, *BSD and Windows (and anywhere you can run Docker)\n* Easy installation as a service for Linux, macOS and Windows\n* [Linux users can install it using deb or rpm packages][runner-linux-repo]\n* Easy to use setup with support for Docker, Docker with SSH, Parallels or\n  plain SSH running environments\n* Allows to run:\n  - multiple jobs concurrently\n  - use multiple tokens with multiple servers (even per-project)\n  - limit number of concurrent jobs per-token\n* Jobs can be run:\n  - locally using your shell\n  - using Docker containers\n  - using Docker containers and executing jobs over SSH\n  - by connecting to a remote SSH server\n* Supports Bash, Windows Batch and Windows PowerShell\n* Allows to customize the running environment per job\n* Automatic configuration reload without restart\n* Enables caching of Docker containers\n* List of files and directories can be attached to build after success\n  (the so-called `artifacts` feature)\n\nAs you can see, there is much flexibility in installing and configuring\na GitLab Runner.\n\n## What the future holds\n\nWe will keep pushing forward in making GitLab CI a breeze to use and extend it\nto adapt to present and future technologies of the CI field.\n\nSome notable features that we would like to see happening are listed in the\nissues below:\n\n- [Pass CI build artifacts between stages][issue-3423]\n- [GitLab container registry][issue-3299]\n- [GitLab Pipeline][issue-3743]\n- [GitLab Deploy][issue-3286]\n\nAs always, you can visit our [direction page](/direction/) and get a taste of\nthe forthcoming features.\n\n## What's next? \n\nBy now you should have an overview of what GitLab CI is and why you should\nstart using it.\n\nIf you are eager to give it a go, be sure to visit our [quick start guide][].\n\nIn the following posts, we will explore how to use GitLab CI to test your\nprojects in specific languages such as PHP.\n\nSubscribe to our newsletter which includes links to our latest blog posts.\n\n[ci-wiki]: https://en.wikipedia.org/wiki/Continuous_integration\n[cd-wiki]: https://en.wikipedia.org/wiki/Continuous_delivery\n[ci-fowler]: http://www.martinfowler.com/articles/continuousIntegration.html\n[8-post]: /releases/2015/09/22/gitlab-8-0-released/\n[ci-page]: /solutions/continuous-integration/\n[issue-3423]: https://gitlab.com/gitlab-org/gitlab-ce/issues/3423\n[issue-3299]: https://gitlab.com/gitlab-org/gitlab-ce/issues/3299\n[issue-3743]: https://gitlab.com/gitlab-org/gitlab-ce/issues/3743\n[issue-3286]: https://gitlab.com/gitlab-org/gitlab-ce/issues/3286\n[quick start guide]: http://doc.gitlab.com/ce/ci/quick_start/\n[runner-repo]: https://gitlab.com/gitlab-org/gitlab-runner\n[runner-linux-repo]: https://gitlab.com/gitlab-org/gitlab-runner/blob/master/docs/install/linux-repository.md\n",{"slug":9340,"featured":6,"template":678},"getting-started-with-gitlab-and-gitlab-ci","content:en-us:blog:getting-started-with-gitlab-and-gitlab-ci.yml","Getting Started With Gitlab And Gitlab Ci","en-us/blog/getting-started-with-gitlab-and-gitlab-ci.yml","en-us/blog/getting-started-with-gitlab-and-gitlab-ci",{"_path":9346,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9347,"content":9352,"config":9357,"_id":9359,"_type":16,"title":9360,"_source":17,"_file":9361,"_stem":9362,"_extension":20},"/en-us/blog/quayio",{"title":9348,"description":9349,"ogTitle":9348,"ogDescription":9349,"noIndex":6,"ogImage":2478,"ogUrl":9350,"ogSiteName":692,"ogType":693,"canonicalUrls":9350,"schema":9351},"Integrate GitLab-hosted Docker projects with Quay.io","Continuous Integration of GitLab-Hosted Docker Projects with Quay.io","https://about.gitlab.com/blog/quayio","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Continuous Integration of GitLab-Hosted Docker Projects with Quay.io\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Joseph Schorr from CoreOS\"}],\n        \"datePublished\": \"2015-08-19\",\n      }",{"title":9349,"description":9349,"authors":9353,"heroImage":2478,"date":9355,"body":9356,"category":14},[9354],"Joseph Schorr from CoreOS","2015-08-19","\n_This is a guest post by CoreOS._\n\nThe proliferation of containerization via systems such as Docker and [rkt](https://github.com/coreos/rkt) has introduced many benefits for application developers worldwide. However, this trend towards running applications in containers has also introduced hurdles when trying to ensure continuous integration of applications. Developers who depend upon continuous integration are faced with a new problem: How to ensure they always have a fully up-to-date container image of their source code, built every time they push to source control, and, available in their container registry immediately.\n\nTo help developers be more efficient working in teams and ensure they are developing on the most up-to-date version of their container images, we at [Quay.io](https://quay.io/) developed a continuous building pipeline.\nQuay.io transforms [source code found in GitLab](/solutions/source-code-management/) and other SCMs into Docker container images on every push. Quay.io, delivered by CoreOS, is a secure and easy way to build, manage, store and serve Docker container images.\n\nBy bringing Quay.io together with GitLab, users are able to develop easier and faster thanks to seamless syncing of their code to container images, helping to identify problems more quickly and easily test their updated container images in response to changes.\n\n\u003C!-- more -->\n\n## How to set up a GitLab project with a Dockerfile\n\nGetting started with GitLab and Quay.io can be done in a few simple steps.\n\n### Sign in to GitLab\n\nFirst, [sign in to GitLab](https://gitlab.com/users/sign_in) with your account.\nQuay.io will request access to your GitLab repository later on in this process.\n\n### Sign in to Quay.io\n\n[Sign into Quay.io](https://quay.io/signin) with either a username and password, or GitHub/Google.\nIf you do not yet have a Quay.io account, an account with a 30-day free trial can be created here: [https://quay.io/plans](https://quay.io/plans).\n\n### Use an existing container repository in Quay.io\n\nIf you have an existing container repository in Quay.io, click on the repository you’d like to automatically build, and click the builds tab found on the left-hand side of the repository screen:\n\n![Quay.io build tab](https://about.gitlab.com/images/quayio/build-tab.png)\n\nFrom here, scroll to the bottom of the tab and click the “Create Build Trigger” button.\nYou will be presented with a menu showing the various source control management systems supported by Quay.io.\nChoose **GitLab Repository Push**:\n\n![Creating a Quay.io build trigger](https://about.gitlab.com/images/quayio/create-trigger.png)\n\n### Create a new container repository in Quay.io\n\nIf you do not yet have a container repository on Quay.io, follow these instructions to create a new repository.\n\nClick the **+** icon next to your username in the upper right hand corner of Quay.io, and click [**New Repository**](https://quay.io/new/):\n\n![](https://about.gitlab.com/images/quayio/new-repo.png)\n\nYou will be asked for a new name for the repository, whether to make it public or private, and how to initialize the repository.\n\nChoose **Link to a GitLab Repository Push** for the Initialize Repository option:\n\n![](https://about.gitlab.com/images/quayio/initialize-repo.png)\n\n### Allow Quay.io access to GitLab\n\nOnce the above process has completed, you will be redirected to GitLab to begin the build trigger setup process:\n\n![](https://about.gitlab.com/images/quayio/oauth.png)\n\nClick **Authorize** to grant Quay.io access to your GitLab repositories.\n\n### Select the GitLab repository to build\n\nAfter authorization, GitLab will redirect you back to Quay.io, which (after a few moments) will display the trigger setup dialog:\n\n![Quay.io trigger dialog](https://about.gitlab.com/images/quayio/dialog1.png)\n\nChoose a GitLab repository to build (or enter the repository in the repository box) and click **Next**.\n\n### Finish GitLab trigger setup\n\nYou will be asked for other information pertaining to the build, such as the subdirectory that contains the Dockerfile (usually this is just “/”), what branches/tags to build (by default: everything) and whether to use a [Robot Account](http://docs.quay.io/glossary/robot-accounts.html) as pull credentials (only required if you have a private base image on Quay.io).\n\nOnce this information is entered, the build trigger will be set up on GitLab:\n\n![Final Quay.io trigger dialog](https://about.gitlab.com/images/quayio/dialog2.png)\n\nFrom this point forward, any pushes to your GitLab repository will immediately start a build on Quay.io of the commit pushed.\n\nIf you want to view a build without a GitLab push, you can click **Run Trigger Now** to have Quay.io pull the latest commit from GitLab and begin a build to populate your container repository.\n\n### Monitor GitLab builds on Quay.io\n\nAll builds triggered from GitLab can be found under the builds tab in the repository page.\nEach build shows its status, the commit information and links to the commit itself on GitLab:\n\n![Active build on a Quay.io build tab](https://about.gitlab.com/images/quayio/build.png)\n\nClicking on the build ID will take you to a page to view the logs of the build in real-time:\n\n![Build logs on Quay.io](https://about.gitlab.com/images/quayio/build-logs.png)\n\nThe logs for each step can be displayed by clicking on the **>** arrow next to the step, to expands it.\n\n### GitLab + Quay.io = 👍\n\nAs we’ve seen, setting up a continuous integration pipeline from GitLab to Quay.io can be done in a few short and easy steps.\nThe combination of these two powerful services allows developers to be confident that when source code is updated, there will be a fully updated Docker container available for testing and usage with a few short minutes.\n\nTo get started with Quay.io, [sign up for a free 30-day trial](https://quay.io/?utm_source=GitLab&utm_medium=Blog&utm_campaign=GitLab) or [contact us](https://quay.io/contact/) if you have any questions.\n\nWant to build GitLab repositories behind your firewall? We got you covered with our on-premises [Quay.io Enterprise Registry](https://coreos.com/products/enterprise-registry/).\n",{"slug":9358,"featured":6,"template":678},"quayio","content:en-us:blog:quayio.yml","Quayio","en-us/blog/quayio.yml","en-us/blog/quayio",{"_path":9364,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9365,"content":9370,"config":9375,"_id":9377,"_type":16,"title":9378,"_source":17,"_file":9379,"_stem":9380,"_extension":20},"/en-us/blog/feature-highlight-merge-request-approvals",{"title":9366,"description":9367,"ogTitle":9366,"ogDescription":9367,"noIndex":6,"ogImage":2478,"ogUrl":9368,"ogSiteName":692,"ogType":693,"canonicalUrls":9368,"schema":9369},"Feature Highlight: Merge Request Approvals","If you want keep code quality high, it is important that you use a code review process. In GitLab, the best way to do this is by using Merge Requests. Read how to approve merge requests here.","https://about.gitlab.com/blog/feature-highlight-merge-request-approvals","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Feature Highlight: Merge Request Approvals\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Job van der Voort\"}],\n        \"datePublished\": \"2015-07-29\",\n      }",{"title":9366,"description":9367,"authors":9371,"heroImage":2478,"date":9373,"body":9374,"category":14},[9372],"Job van der Voort","2015-07-29","\n\nIf you want keep code quality high, it is important that you use a code review\nprocess. In GitLab, the best way to do this is by using Merge Requests.\n\nWe created merge requests so that only a person with the required\npermission (developer or higher) can merge code into the target branch.\nIf you want more people to review code before it's merged, you can now do this\nwith Merge Request Approvals in [GitLab Enterprise Edition].\n\n_Note: this is a follow up to our [previous feature highlight on approvals],\nsince we've added additional functionality in GitLab 7.13_\n\n\u003C!-- more -->\n\n## How Approvals work\n\nApprovals will block the merging of a merge request until the configured number\nof approvals has been met. This allows you to force a certain amount of people\nto check all the code that goes into important branches in your repository.\n\nYou can set the number of required approvals and you can assign specific approvers\nthat need to approve the merge request. If you set specific approvers, only\nthey will be able to approve the merge request. If you do not, anyone with\ndeveloper permission or higher will be able to approve the merge request.\n\n![Approvers in a Merge Request](https://about.gitlab.com/images/7_13/approvers_mr.png)\n\n### Assigning Approvers\n\nIt's possible to use a combination of specific and non-specific approvers,\nfor instance by setting the required number of approvers to `3` and only\n`Jane` as an approver.\n\nYou can even set a higher number of approvers than required approvals, in which\ncase only a subset of the approvers needs to approve the merge request.\nWith one required approver and `Jane` and `John` set as approvers, either\n`Jane` or `John` need to approve the merge request.\n\n### Default Approvers\n\nYou can choose the approvers on merge request creation, but a default can be\nset in the project settings. This prevents you from having to change the project\nsettings every time an important code reviewer is unavailable.\n\n### Automatically Resetting Approvals\n\nIf you want to have all your approvals reset after a new push is made to the\nmerge request, you can configure this. This means that after each push that is\nmade, any previously done approvals are reset.\n\nIf this setting is turned off, approvals will persist, independent of pushes\nto the merge request.\n\n## Getting started with Approvals\n\nTo start using Approvals, visit the settings of your project and set the\nrequired amount of approvers to a value of your choosing, higher than 1.\n\n![Setting default suggested approvers for a project](https://about.gitlab.com/images/7_13/approvers_settings.png)\n\nHere you are also able to set the default approvers, and whether you want to\nreset the approvals on each push to the merge request.\n\n## Future\n\nWe created the Merge Request Approvals on request of our customers. Our goal\nis to add great features to GitLab that benefit everyone that uses them and\ndon't inconvenience anyone that doesn't.\n\nWe're thinking about more improvements to the Merge Request Approvals, the main\nimprovement being automatic suggestions for reviewers, based on the history of\nthe changed files in the merge request.\nFor instance, if Jane worked a lot on a certain class and you submit a change\nto that class, Jane gets suggested to approve your merge request.\n\nWe're interested in hearing what you think about this feature and how we can\nfurther improve it.\n\n## Documentation\n\nFind our [documentation on Merge Request Approvals].\n\n[GitLab Enterprise Edition]: /pricing/\n[previous feature highlight on approvals]: /2015/06/16/feature-highlight-approve-merge-request/\n[documentation on Merge Request Approvals]: https://docs.gitlab.com/ee/user/project/merge_requests/approvals/\n",{"slug":9376,"featured":6,"template":678},"feature-highlight-merge-request-approvals","content:en-us:blog:feature-highlight-merge-request-approvals.yml","Feature Highlight Merge Request Approvals","en-us/blog/feature-highlight-merge-request-approvals.yml","en-us/blog/feature-highlight-merge-request-approvals",{"_path":9382,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9383,"content":9388,"config":9392,"_id":9394,"_type":16,"title":9395,"_source":17,"_file":9396,"_stem":9397,"_extension":20},"/en-us/blog/implementing-gitlab-ci-dot-yml",{"title":9384,"description":9385,"ogTitle":9384,"ogDescription":9385,"noIndex":6,"ogImage":2478,"ogUrl":9386,"ogSiteName":692,"ogType":693,"canonicalUrls":9386,"schema":9387},"Implementing .gitlab-ci.yml","We replacing GitLab CI jobs with a .gitlab-ci.yml and wanted to share the details of that process with you and would love to hear what you think.","https://about.gitlab.com/blog/implementing-gitlab-ci-dot-yml","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Implementing .gitlab-ci.yml\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Job van der Voort\"}],\n        \"datePublished\": \"2015-06-08\",\n      }",{"title":9384,"description":9385,"authors":9389,"heroImage":2478,"date":9390,"body":9391,"category":14},[9372],"2015-06-08","\n\nWe [wrote] about why we're replacing GitLab CI jobs with a `.gitlab-ci.yml` file.\nAs we've started on implementing this large change, we wanted to share the details\nof that process with you and would love to hear what you think.\n\n\u003C!-- more -->\n\nTo recap the [previous article]:\ncurrently you are required to write out your CI jobs in GitLab CI's interface.\nWe're replacing this with a single file `.gitlab-ci.yml`, that you place in the root\nof your repository.\n\n## Schema change\n\nCurrently, on a push to GitLab, GitLab sends a web-hook to the CI Coordinator.\nThe coordinator creates a build based on the jobs that are defined in its UI,\nwhich can then be executed by the connected Runners.\n\nIn the new schema, GitLab sends the web-hook _and the `.gitlab-ci.yml`_ contents\nto the CI Coordinator, which creates builds based on the yml file. In turn,\nthese builds are executed by the Runners as before.\n\n## Migrating to new style\n\nKeeping two different ways of doing things would be a strain on development and\nsupport, not to mention confusing. So we're not just deprecating the old style\nof defining jobs, we're removing it entirely and will migrate existing jobs.\n\nUpon upgrading your existing jobs defined in the GitLab CI Coordinator will be\nconverted into a YAML file with the new syntax. You can download this file at any\ntime from the project settings.\n\nWhen the GitLab webhook triggers and doesn't transmit the content from `.gitlab-ci.yml`,\nthe coordinator will use the converted YAML file instead.\n\nThis makes migrating to the new style very easy. You can start by simply copy-pasting\nthe contents of the converted YAML file to the root of your repository. Existing projects\nwill continue to build successfully, yet new projects do not have the option to\nuse anything else.\n\n## An example `.gitlab-ci.yml`\n\nTo get an idea of how the `.gitlab-ci.yml` will look, we've prepared an example\nfor a Ruby on Rails project (such as GitLab itself). Of course, this is due to\nchange as we're still working on this.\n\n```\n# Refs to skip\nskip_refs: “deploy*”\n\n# Run before each script\n\n# Refs to skip\nskip_refs: “deploy*”\n\n# Run before each script\nbefore_script:\n  - export PATH=$HOME/bin:/usr/local/bin:/usr/bin:/bin\n  - gem install bundler\n  - cp config/database.yml.mysql config/database.yml\n  - cp config/gitlab.yml.example config/gitlab.yml\n  - touch log/application.log\n  - touch log/test.log\n  - bundle install --without postgres production --jobs $(nproc)\n  - “bundle exec rake db:create RAILS_ENV=test”\n\n# Parallel jobs, each line is a parallel build\njobs:\n  - script: “rake spec”\n    runner: “ruby,postgres”\n    name: “Rspec”\n  - script: “rake spinach”\n    runner: “ruby,mysql”\n    name: “Spinach”\n    tags: true\n    branches: false\n\n# Parallel deploy jobs\non_success:\n  - “cap deploy production”\n  - “cap deploy staging”\n```\n\n\u003Ca id=\"update\">\u003C/a>\n\n## UPDATE\n\nDmitriy and Sytse spend some time thinking about file syntax.\nScripting should be simple and memorable. Thats why we come with better proposal:\n\n```\nbefore_script:\n  - gem install bundler\n  - bundle install\n  - bundle exec rake db:create\n\nrspec:\n  test: \"rake spec\"\n  tags:\n    - ruby\n    - postgres\n  only:\n    - branches\n\nspinach:\n  test: \"rake spinach\"\n  tags:\n    - ruby\n    - mysql\n  except:\n    - tags\n\nstaging:\n  deploy: \"cap deploy stating\"\n  tags:\n    - capistrano\n    - debian\n  except:\n    - stable\n\nproduction:\n  deploy:\n    - cap deploy production\n    - cap notify\n  tags:\n    - capistrano\n    - debian\n  only:\n    - master\n    - /^deploy-.*$/\n```\n\n## Contribute\n\nGitLab is nothing without its community.\nContribute or follow the development in the [GitLab CI repository].\n\n[wrote]: /blog/why-were-replacing-gitlab-ci-jobs-with-gitlab-ci-dot-yml/\n[previous article]: /blog/why-were-replacing-gitlab-ci-jobs-with-gitlab-ci-dot-yml/\n[GitLab CI repository]: https://gitlab.com/gitlab-org/gitlab-ci/commit/c2c9236cde807e98ff9571f8d23ac4def75eb9ba",{"slug":9393,"featured":6,"template":678},"implementing-gitlab-ci-dot-yml","content:en-us:blog:implementing-gitlab-ci-dot-yml.yml","Implementing Gitlab Ci Dot Yml","en-us/blog/implementing-gitlab-ci-dot-yml.yml","en-us/blog/implementing-gitlab-ci-dot-yml",{"_path":9399,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9400,"content":9405,"config":9409,"_id":9411,"_type":16,"title":9412,"_source":17,"_file":9413,"_stem":9414,"_extension":20},"/en-us/blog/how-gitlab-uses-unicorn-and-unicorn-worker-killer",{"title":9401,"description":9402,"ogTitle":9401,"ogDescription":9402,"noIndex":6,"ogImage":2478,"ogUrl":9403,"ogSiteName":692,"ogType":693,"canonicalUrls":9403,"schema":9404},"How GitLab uses Unicorn and unicorn-worker-killer","We just wrote some new documentation on how Gitlab uses Unicorn and unicorn-worker-killer, available on doc.gitlab.com. Read here!","https://about.gitlab.com/blog/how-gitlab-uses-unicorn-and-unicorn-worker-killer","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab uses Unicorn and unicorn-worker-killer\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jacob Vosmaer\"}],\n        \"datePublished\": \"2015-06-05\",\n      }",{"title":9401,"description":9402,"authors":9406,"heroImage":2478,"date":9407,"body":9408,"category":14},[3103],"2015-06-05","\n\nWe just wrote some new documentation on how Gitlab uses Unicorn and\nunicorn-worker-killer, available on [doc.gitlab.com](http://doc.gitlab.com/ce/operations/unicorn.html) but\nalso included below. We would love to hear from the community if you have other\nquestions so we can improve this documentation resource!\n\n\u003C!-- more -->\n\nUpdate 19:29 CEST: made link to doc.gitlab.com more specific.\n\n## Understanding Unicorn and unicorn-worker-killer\n\n### Unicorn\n\nGitLab uses [Unicorn](http://unicorn.bogomips.org/), a pre-forking Ruby web\nserver, to handle web requests (web browsers and Git HTTP clients). Unicorn is\na daemon written in Ruby and C that can load and run a Ruby on Rails\napplication; in our case the Rails application is GitLab Community Edition or\nGitLab Enterprise Edition.\n\nUnicorn has a multi-process architecture to make better use of available CPU\ncores (processes can run on different cores) and to have stronger fault\ntolerance (most failures stay isolated in only one process and cannot take down\nGitLab entirely). On startup, the Unicorn 'master' process loads a clean Ruby\nenvironment with the GitLab application code, and then spawns 'workers' which\ninherit this clean initial environment. The 'master' never handles any\nrequests, that is left to the workers. The operating system network stack\nqueues incoming requests and distributes them among the workers.\n\nIn a perfect world, the master would spawn its pool of workers once, and then\nthe workers handle incoming web requests one after another until the end of\ntime. In reality, worker processes can crash or time out: if the master notices\nthat a worker takes too long to handle a request it will terminate the worker\nprocess with SIGKILL ('kill -9'). No matter how the worker process ended, the\nmaster process will replace it with a new 'clean' process again. Unicorn is\ndesigned to be able to replace 'crashed' workers without dropping user\nrequests.\n\nThis is what a Unicorn worker timeout looks like in `unicorn_stderr.log`. The\nmaster process has PID 56227 below.\n\n```\n[2015-06-05T10:58:08.660325 #56227] ERROR -- : worker=10 PID:53009 timeout (61s > 60s), killing\n[2015-06-05T10:58:08.699360 #56227] ERROR -- : reaped #\u003CProcess::Status: pid 53009 SIGKILL (signal 9)> worker=10\n[2015-06-05T10:58:08.708141 #62538]  INFO -- : worker=10 spawned pid=62538\n[2015-06-05T10:58:08.708824 #62538]  INFO -- : worker=10 ready\n```\n\n### Tunables\n\nThe main tunables for Unicorn are the number of worker processes and the\nrequest timeout after which the Unicorn master terminates a worker process.\nSee the [omnibus-gitlab Unicorn settings\ndocumentation](https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/doc/settings/unicorn.md)\nif you want to adjust these settings.\n\n## unicorn-worker-killer\n\nGitLab has memory leaks. These memory leaks manifest themselves in long-running\nprocesses, such as Unicorn workers. (The Unicorn master process is not known to\nleak memory, probably because it does not handle user requests.)\n\nTo make these memory leaks manageable, GitLab comes with the\n[unicorn-worker-killer gem](https://github.com/kzk/unicorn-worker-killer). This\ngem [monkey-patches](http://en.wikipedia.org/wiki/Monkey_patch) the Unicorn\nworkers to do a memory self-check after every 16 requests. If the memory of the\nUnicorn worker exceeds a pre-set limit then the worker process exits. The\nUnicorn master then automatically replaces the worker process.\n\nThis is a robust way to handle memory leaks: Unicorn is designed to handle\nworkers that 'crash' so no user requests will be dropped. The\nunicorn-worker-killer gem is designed to only terminate a worker process _in\nbetween requests_, so no user requests are affected.\n\nThis is what a Unicorn worker memory restart looks like in unicorn_stderr.log.\nYou see that worker 4 (PID 125918) is inspecting itself and decides to exit.\nThe threshold memory value was 254802235 bytes, about 250MB. With GitLab this\nthreshold is a random value between 200 and 250 MB.  The master process (PID\n117565) then reaps the worker process and spawns a new 'worker 4' with PID\n127549.\n\n```\n[2015-06-05T12:07:41.828374 #125918]  WARN -- : #\u003CUnicorn::HttpServer:0x00000002734770>: worker (pid: 125918) exceeds memory limit (256413696 bytes > 254802235 bytes)\n[2015-06-05T12:07:41.828472 #125918]  WARN -- : Unicorn::WorkerKiller send SIGQUIT (pid: 125918) alive: 23 sec (trial 1)\n[2015-06-05T12:07:42.025916 #117565]  INFO -- : reaped #\u003CProcess::Status: pid 125918 exit 0> worker=4\n[2015-06-05T12:07:42.034527 #127549]  INFO -- : worker=4 spawned pid=127549\n[2015-06-05T12:07:42.035217 #127549]  INFO -- : worker=4 ready\n```\n\nOne other thing that stands out in the log snippet above, taken from\nGitlab.com, is that 'worker 4' was serving requests for only 23 seconds. This\nis a normal value for our current GitLab.com setup and traffic.\n\nThe high frequency of Unicorn memory restarts on some GitLab sites can be a\nsource of confusion for administrators. Usually they are a [red\nherring](http://en.wikipedia.org/wiki/Red_herring).\n",{"slug":9410,"featured":6,"template":678},"how-gitlab-uses-unicorn-and-unicorn-worker-killer","content:en-us:blog:how-gitlab-uses-unicorn-and-unicorn-worker-killer.yml","How Gitlab Uses Unicorn And Unicorn Worker Killer","en-us/blog/how-gitlab-uses-unicorn-and-unicorn-worker-killer.yml","en-us/blog/how-gitlab-uses-unicorn-and-unicorn-worker-killer",{"_path":9416,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9417,"content":9422,"config":9426,"_id":9428,"_type":16,"title":9429,"_source":17,"_file":9430,"_stem":9431,"_extension":20},"/en-us/blog/gitlab-dot-com-outage-on-2015-05-29",{"title":9418,"description":9419,"ogTitle":9418,"ogDescription":9419,"noIndex":6,"ogImage":2478,"ogUrl":9420,"ogSiteName":692,"ogType":693,"canonicalUrls":9420,"schema":9421},"GitLab.com outage on 2015-05-29","A quick summary of the causes and solutions regarding the GitLab.com outage on 2015-05-29","https://about.gitlab.com/blog/gitlab-dot-com-outage-on-2015-05-29","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab.com outage on 2015-05-29\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jacob Vosmaer\"}],\n        \"datePublished\": \"2015-06-04\",\n      }",{"title":9418,"description":9419,"authors":9423,"heroImage":2478,"date":9424,"body":9425,"category":14},[3103],"2015-06-04","\n\nGitLab.com suffered an outage from  2015-05-29 01:00 to 2015-05-29 02:34 (times in UTC).\nIn this blog post we will discuss what happened, why it took so long to recover the service, and what we are doing to reduce the likelihood and impact of such incidents.\n\n\u003C!-- more -->\n\n## Background\n\nGitLab.com is provided and maintained by the team of GitLab B.V., the company behind GitLab.\nOn 2015-05-02 we performed a major infrastructure upgrade, moving GitLab.com from a single server to a small cluster of servers, consisting of a load balancer (running HAproxy), three workers (NGINX/Unicorn/Sidekiq/gitlab-shell) and a backend server (PostgreSQL/Redis/NFS).\nThis new infrastructure configuration improved the responsiveness of GitLab.com, at the expense of having more moving parts.\n\nGitLab.com is backed up using Amazon EBS snapshots.\nTo protect against inconsistent snapshots our backup script 'freezes' the filesystem on the backend server with `fsfreeze` prior to making EBS snapshots, and 'unfreezes' the filesystem immediately after.\n\n## Timeline\n\nItalic comments below are written with the knowledge of hindsight\n\n- 1:00 The GitLab.com backup script is activated by Cron on the backend server.\n  _For unknown reasons, the backup script hangs/crashes before or during the 'unfreeze' of the filesystem holding all user data._\n- 1:07 Our on-call engineer is paged by [Pingdom](http://status.gitlab.com).\n  The on-call engineer tries to diagnose the issue on the worker servers but is unable to diagnose the problem.\n  _The issue was on the backend server, not on the workers._\n- 1:30 The on-call engineer decides to call in more help.\n  The other team members with access and knowledge to resolve the issue are all in Europe at this time, where it is 3:30/4:30am.\n- 1:45 A second engineer in Europe has been woken up and takes the lead on the investigation of the outage.\n  More workers are rebooted because they appear to be stuck.\n  It becomes apparent that the workers cannot mount the NFS share which holds all Git repository data.\n- 1:51 One of the engineers notices that the load on the backend server is more than 150. _A normal value would be less than 5._\n- 2:10 The engineers give up on running commands on the workers to bring the NFS share back, and start investigating the backend server.\n  The engineers discuss whether they should reboot the backend server but they are unsure if it is safe given that this setup is fairly new.\n- 2:21 The engineers reboot the backend server.\n  The reboot is taking a long time.\n  _The AWS 'reboot' command first tries a soft reboot, and only does a hard reboot after a 4-minute timeout.\n  The soft reboot probably hung when it tried to shut down services that were trying to write to the 'frozen' disk._\n- 2:30 The backend server has rebooted and the engineers regain SSH access to it.\n  The worker servers are able to mount the NFS share now but GitLab.com is still not functioning because the Postgres database server is not responding.\n  One of the engineers restarts Postgres on the backend server.\n  _It may have been that Postgres was still busy performing crash recovery._\n- 2:34 Gitlab.com is available again.\n\n## Root causes\n\nAlthough we cannot explain _what_ went wrong with the backup script it is hard to come to another conclusion that _something_ did go wrong with it.\n\nThe length of the outage was caused by insufficient training and documentation for our on-call engineers following the infrastructure upgrade rolled out on May 2nd.\n\n## Next steps\n\nWe have removed the freeze/unfreeze steps from our backup script.\nBecause this (theoretically) increases the risk of occasional corrupt backups we have added a second backup strategy for our SQL data.\nIn the future we would like to have automatical validation of our GitLab.com backups.\n\nThe day before this incident we decided the training was our most important priority.\nWe have started to do regular operations drills in one-on-one sessions with all of our on-call engineers.\n",{"slug":9427,"featured":6,"template":678},"gitlab-dot-com-outage-on-2015-05-29","content:en-us:blog:gitlab-dot-com-outage-on-2015-05-29.yml","Gitlab Dot Com Outage On 2015 05 29","en-us/blog/gitlab-dot-com-outage-on-2015-05-29.yml","en-us/blog/gitlab-dot-com-outage-on-2015-05-29",{"_path":9433,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9434,"content":9439,"config":9443,"_id":9445,"_type":16,"title":9446,"_source":17,"_file":9447,"_stem":9448,"_extension":20},"/en-us/blog/version-check",{"title":9435,"description":9436,"ogTitle":9435,"ogDescription":9436,"noIndex":6,"ogImage":2478,"ogUrl":9437,"ogSiteName":692,"ogType":693,"canonicalUrls":9437,"schema":9438},"Version Check Functionality","We're working on a version check function for GitLab to reduce the problem of outdated servers which can be a security problem and provide a bad user experience","https://about.gitlab.com/blog/version-check","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Version Check Functionality\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2015-05-07\",\n      }",{"title":9435,"description":9436,"authors":9440,"heroImage":2478,"date":9441,"body":9442,"category":14},[2269],"2015-05-07","\n\nWe're working on a version check function for GitLab to reduce the problem of outdated servers.\nThese servers are a security problem, provide a bad user experience and\nlead to issues being created with problems that have already been solved.\nBy making outdated installations visible to its users we hope that people will upgrade sooner.\n\n\u003C!--more-->\n\n## How it'll work\n\nThe version check will work in the following way. The `/help` page of GitLab will\nload an image from _version.gitlab.com_. This image will show green for an\nup to date version, yellow for an out of date version and red for a missing security update.\n\n![No update necessary](https://about.gitlab.com/images/version_check/green.png)\n![New version out!](https://about.gitlab.com/images/version_check/orange.png)\n![Update ASAP](https://about.gitlab.com/images/version_check/red.png)\n\nThe image requests parameters requests will contain the GitLab version and the server hostname.\nWe'll store each request with a timestamp, the GitLab version and the server hostname.\nWe will not store the user ip-address.\n\nWe will send the server hostname to have more information about where and how GitLab is used.\nLoading external images is similar to how the gravatar images of users are used.\n\n### Opt-out\n\nJust like the gravatar images you will be able to turn off the functionality\nif you don't want your GitLab server to connect outside the firewall.\nThe version check functionality can be disabled in the application settings.\n\n## Trade-off\n\nProviding the new package server and the version check server requires\nconstant maintenance and operational capacity.\nGetting better insight into where and how GitLab is used\nwill help us improve GitLab for everyone.\n\nWe realize that it sending the server\nhostname by default is not a trivial action and not everyone will be happy about this.\nWe think that ensuring the sustainability of GitLab package server and\nversion check services makes it a good trade-off.\nThere will always be an option to turn this behavior off.\n\nPlease let us know what you think about the above plan in the comments.\n\n## Update\n\nWe decided against sending the hostname in the url of the picture request.\nBut the https picture request itself will have a HTTP referer header.\nWe can use that to see where and how GitLab is used.\nWe will still not store the ip-address of the requests.",{"slug":9444,"featured":6,"template":678},"version-check","content:en-us:blog:version-check.yml","Version Check","en-us/blog/version-check.yml","en-us/blog/version-check",{"_path":9450,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9451,"content":9456,"config":9460,"_id":9462,"_type":16,"title":9463,"_source":17,"_file":9464,"_stem":9465,"_extension":20},"/en-us/blog/unofficial-gitlab-ci-runner",{"title":9452,"description":9453,"ogTitle":9452,"ogDescription":9453,"noIndex":6,"ogImage":2478,"ogUrl":9454,"ogSiteName":692,"ogType":693,"canonicalUrls":9454,"schema":9455},"Unofficial GitLab CI Runner","GitLab CI Multi-purpose Runner is yet another CI runner, but this time written in Go with a vast number of features that leverage all the latest technologies.","https://about.gitlab.com/blog/unofficial-gitlab-ci-runner","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Unofficial GitLab CI Runner\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kamil Trzciński\"}],\n        \"datePublished\": \"2015-04-17\",\n      }",{"title":9452,"description":9453,"authors":9457,"heroImage":2478,"date":9458,"body":9459,"category":14},[9160],"2015-04-17","\n\n[GitLab CI Multi-purpose Runner](https://gitlab.com/gitlab-org/gitlab-runner) is yet another CI runner, but this time written in Go with a vast number of features that leverage all the latest technologies. It's an unofficial project made by me, Kamil Trzciński, with love for GitLab CI to help aid some problems with current runner and make the use of CI really simple and secure.\n\n\u003C!-- more -->\n\n## Why it was created?\n\nI created that project, because I needed a runner that allows to use virtualization technology (Docker and Parallels for macOS) to build our ([Polidea](https://www.polidea.com/)) projects. The main reason is that the official GitLab-CI-Runner is a very simple application written in Ruby, but works well in quite basic setups. You can think of it as a reference implementation of what a bare runner can look like. It's distributed as source code or as a simple omnibus package to install on one of the supported Linux distributions. However, there are some areas where that makes it quite hard to use:\n\n* The runner can only run one concurrent at a time. If you want to run more either you set up a new server or create an additional user to build the jobs.\n* What is important is that the official runner always runs projects on the server shell. This makes it really hard to test projects using different versions of Ruby or any other dependencies. It also makes the build environment dirty and builds are not really 100% reliable. This collides with a recent approach to always have a clean build environment.\n* The runner works only on Linux-based platforms and it's not really an easy task to make the runner a service that is run at system start. Additional hacks are required to make it run on macOS. There is no support for Windows. \n* It's quite hard to set up the next server with all dependencies required to build project.\n* It's quite hard to perform some administrative tasks with the official runner.\n\n## Why do I need it?\n\nThe Gitlab-CI Multi-purpose Runner is one binary that you can put on your machine of any kind. It is really easy to set up as a service and can work with multiple projects and multiple GitLab CI coordinators. With support for Docker it makes it really easy to set up build environment with different versions of packages. \n\n## How to install?\n\nIt's really simple. There's a multiple ways to install GitLab-CI Multi-purpose Runner:\n\n* [Install using Debian/Ubuntu/CentOS/RedHat package](https://gitlab.com/gitlab-org/gitlab-runner/blob/master/docs/install/linux-repository.md)\n* [Install on macOS](https://gitlab.com/gitlab-org/gitlab-runner/blob/master/docs/install/osx.md)\n* [Install on Windows](https://gitlab.com/gitlab-org/gitlab-runner/blob/master/docs/install/windows.md)\n* [Install as Docker Service](https://gitlab.com/gitlab-org/gitlab-runner/blob/master/docs/install/docker.md)\n\n## How to use it?\n\n```bash\n$ cd ~gitlab_ci_multi_runner\n$ gitlab-ci-multi-runner register\n```\n\nYou will be asked about how it should be configured. Once you do it you are pretty much ready to build projects.\n\n## Maybe use Docker?\n\nYou can also use Docker to create runner with specific dependencies. What is important that every time your project is built it will be run in clean environment without any leftovers from previous builds. With this simple commands below you don't have to install any dependencies, because Docker will download everything required to run your tests.\n\n```bash\n$ cd ~gitlab_ci_multi_runner\n$ gitlab-ci-multi-runner register \\\n  --non-interactive \\\n  --url \"https://ci.gitlab.com/\" \\\n  --registration-token \"REGISTRATION_TOKEN\" \\\n  --description \"Ruby 2.1 with MySQL\" \\\n  --executor \"docker\" \\\n  --docker-image ruby:2.1 --docker-mysql latest\n\n$ gitlab-ci-multi-runner register \\\n  --non-interactive \\\n  --url \"https://ci.gitlab.com/\" \\\n  --registration-token \"REGISTRATION_TOKEN\" \\\n  --description \"Python 3.4 with MySQL\" \\\n  --executor \"docker\" \\\n  --docker-image python:3.4 --docker-mysql latest\n```\n\nThe exemplary integrations for GitLab CE and GitLab CI can be found here:\n\n* [Integrate GitLab CE](https://gitlab.com/gitlab-org/gitlab-runner/blob/master/docs/examples/gitlab.md)\n* [Integrate GitLab CI](https://gitlab.com/gitlab-org/gitlab-runner/blob/master/docs/examples/gitlab-ci.md)\n\n## Why Go?\n\nGo is pretty young language already used by same major brands (Docker). It's proven to be stable, well supported with pretty rich library. However the most important thing is that Go compiler can produce single binary without any dependencies for Linux, macOS, FreeBSD, NetBSD, OpenBSD, Plan 9 and Microsoft Windows and the i386, amd64, ARM and IBM POWER processor architectures. The binary is of reasonable size (around 10MB in case of GitLab-CI-Multi-Runner).\nWhat is important is that this is the one project to rule them all. With Go's multiplatform approach it is really simple to build projects than can run on all platforms and in most cases it's sufficient to distribute the application binary only, because it has all required bits to run the project.\n\n## Features\n\n* Allows to run:\n - multiple jobs concurrently\n - use multiple tokens with multiple server (even per-project)\n - limit number of concurrent jobs per-token\n* Jobs can be run:\n - locally\n - using a Docker container\n - using a Docker container and executing jobs over SSH\n - using dynamically provisioned Parallels VM machines\n - connecting to a remote SSH server\n* Is written in Go and is also distributed as single binary without any other requirements\n* Supports Bash, Windows Batch and Windows PowerShell\n* Works on Ubuntu, Debian, macOS and Windows (and anywhere you can run Docker)\n* Allows to customize job running environment\n* Automatic configuration reload without restart\n* Easy to use setup with support for all running environments\n* Support for caching when using Docker containers\n* Support to make runner a Service on Linux, macOS and Windows\n* Current jobs are aborted when service receives interrupt signal - ex. when shutting down the server.\n\n## Is it stable?\n\nThis runner has been in use for quite some time already to build our mobile projects at [Polidea](https://www.polidea.com/). Polidea is a mobile software house that creates and develops apps for a variety of clients. We like customizing and improving our processes as much as possible to make the development process as smooth as it gets – and to create great tools for everyone. We will soon be publishing series of posts how we use GitLab CI to build projects for Android and iOS devices.\n\n## This project needs your help\n\nI think that you got interested. Please try to run it and post some comments on how it works for you. You can also join discussion on [GitLab.com](https://gitlab.com/gitlab-org/omnibus-gitlab-runner/issues/7#note_1074777) whether this runners should become the official one. Thanks!\n",{"slug":9461,"featured":6,"template":678},"unofficial-gitlab-ci-runner","content:en-us:blog:unofficial-gitlab-ci-runner.yml","Unofficial Gitlab Ci Runner","en-us/blog/unofficial-gitlab-ci-runner.yml","en-us/blog/unofficial-gitlab-ci-runner",{"_path":9467,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9468,"content":9474,"config":9478,"_id":9480,"_type":16,"title":9481,"_source":17,"_file":9482,"_stem":9483,"_extension":20},"/en-us/blog/moving-all-your-data",{"title":9469,"description":9470,"ogTitle":9469,"ogDescription":9470,"noIndex":6,"ogImage":9471,"ogUrl":9472,"ogSiteName":692,"ogType":693,"canonicalUrls":9472,"schema":9473},"Moving all your data, 9TB edition","At GitLab B.V. we are working on an infrastructure upgrade to give more CPU power and storage space to GitLab.com. Learn more here!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749684774/Blog/Hero%20Images/van.jpg","https://about.gitlab.com/blog/moving-all-your-data","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Moving all your data, 9TB edition\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jacob Vosmaer\"}],\n        \"datePublished\": \"2015-03-09\",\n      }",{"title":9469,"description":9470,"authors":9475,"heroImage":9471,"date":9476,"body":9477,"category":14},[3103],"2015-03-09","\nAt GitLab B.V. we are working on an infrastructure upgrade to give more CPU\npower and storage space to GitLab.com. (We are currently still running on a\n[single server](/blog/the-hardware-that-powers-100k-git-repos/).) As a\npart of this upgrade we wanted to move gitlab.com from our own dedicated\nhardware servers to an AWS data center 400 kilometers away.  In this blog post\nI will tell you how I did that and what challenges I had to overcome. An epic\nadventure of hand-rolled network tunnels, advanced DRBD features and streaming\n9TB of data through SSH pipes!\n\n\u003C!-- more -->\n\n## What did I have to move?\n\nIn our current setup we run a stock GitLab Enterprise Edition omnibus package,\nwith a single big filesystem mounted at `/var/opt/gitlab`. This\nfilesystem holds all the user data hosted on gitlab.com: Postgres and Redis\ndatabase files, user uploads, and a lot of Git repositories. All I had to do\nto move this data to AWS is to move the files on this filesystem. Sounds simple\nenough, does it not?\n\nSo do we move the files, or the filesystem itself? This is an easy question to\nanswer. Moving the files using something like Rsync is not an option because it\nis just too slow. We do file-based backups every week where we take a block\ndevice snapshot, mount the snapshot and send it across with Rsync. That\ncurrently takes over 24 hours, and 24 hours of downtime while we move\ngitlab.com is not a nice idea. Now you might ask: what if you Rsync once to\nprepare, take the server offline, and then do a quick Rsync just to catch up?\nThat would still take hours just for Rsync to walk through all the files and\ndirectories on disk. No good.\n\nWe have faced and solved this same problem in the past when the amount of data\nwas 5 times smaller. (Rsync was not an option even then.) What I did at that\ntime was to use DRBD to move not just the files themselves, but the whole\nfilesystem they sit on. This time around DRBD again seemed like the best\nsolution for us. It is not the fastest solution to move a lot of data, but what\nis great about it is that you can keep using the filesystem while the data is\nbeing moved, and changes will get synchronized continuously. No downtime for\nour users! (Except maybe 5 minutes at the start to set up the sync.)\n\n## What is DRBD?\n\n[DRBD](http://www.drbd.org) is a system that can create a virtual hard drive\n(block device) on a Linux computer that gets mirrored across a network\nconnection to a second Linux computer. Both computers give a 'real' hard drive\nto DRBD, and DRBD keeps the contents of the real hard drive the same across\nboth computers via the network. One of the two computers gets a virtual hard\ndrive from DRBD, which shows the contents of the real hard drive underneath. If\nyour first computer crashes, you can 'plug in' the virtual hard drive on the\nsecond computer in a matter of seconds, and all your data will still be there\nbecause DRBD kept the 'real' hard drives in sync for you. You can even have the\ntwo computers that are linked by DRBD sit in different buildings, or on\ndifferent continents. Up until our move to AWS, we were using DRBD to protect\nagainst hardware failure on the server that runs gitlab.com: if such a failure\nwould happen, we could just plug in the virtual hard drive with the user data\ninto our stand-by server. In our new data center, the hosting provider (Amazon\nWeb Services) has their own solution for plugging virtual hard drives in and\nout called Elastic Block Storage, so we are no longer using DRBD as a virtual\nhard drive. From an availability standpoint this is not better or worse, but\nusing EBS drives does make it a lot easier for us to make backups because now\nwe can just store snapshots (no more Rsync).\n\n## Using DRBD for a data migration\n\nAlthough DRBD is not really made for this purpose, I felt confident using DRBD\nfor the migration because I had done it before for a migration between data\ncenters. At that time we were moving across the Atlantic Ocean; this time we\nwould only be moving from the Netherlands to Germany.  However, the last time\nwe used DRBD only as a one-off tool. In our pre-migration setup, we were\nalready using DRBD to replicate the filesystem between two servers in the same\nrack. DRBD only lets you share a virtual hard drive between two computers, so\nhow do we now send the data to a _third_ computer in the new data center?\n\nLuckily, DRBD actually has a trick up its sleeve to deal with this, called\n'stacked resources'. This means that our old servers ('linus' and 'monty')\nwould share a virtual hard drive called 'drbd0', and that whoever of the two\nhas the 'drbd0' virtual hard drive plugged in gets to use 'drbd0' as the 'real'\nhard drive underneath a second virtual hard drive, called 'drbd10', which is\nshared with the new server ('theo'). Also see the picture below.\n\n![Stacked DRBD replication](https://about.gitlab.com/images/drbd/drbd-three-nodes.png)\n\nIf linus would malfunction, we could attach drbd0 (the blue virtual hard drive)\non monty and keep gitlab.com going. The 'green' replication (to get the data to\ntheo) would also be able to continue, even after a failover to monty.\n\n## Networking\n\nI liked the picture above, so 'all' I had to do was set it up. That ended up\ntaking a few days, just to set up a test environment, and to figure out how to\ncreate a network tunnel for the green traffic. The network tunnel needed to\nhave a movable endpoint depending on whether linus or monty was primary. We\nalso needed the tunnel because DRBD is not compatible with the [Network Address\nTranslation](http://en.wikipedia.org/wiki/Network_address_translation) used by\nAWS. DRBD assumes that whenever a node listens on an IP address, it is also\nreachable for its partner node at that IP address. On AWS on the other hand, a\nnode will have one or more internal IP addresses, which are distinct from its\n_public_ IP address.\n\nWe chose to work around this with an [IPIP\ntunnel](http://en.wikipedia.org/wiki/IP_in_IP) and manually keyed IPsec\nencryption. Previous experiments indicated that this gave us the best network\nthroughput compared to OpenVPN and GRE tunnels.\n\nTo set up the tunnel I used a shell script that was kept in sync on all three\nservers involved in the migration by Chef.\n\n```\n# Network tunnel configuration script used by GitLab B.V. to migrate data from\n# Delft to Frankfurt\n\n#!/bin/sh\nset -u\n\nPATH=/usr/sbin:/sbin:/usr/bin:/bin\n\nfrankfurt_public=54.93.71.23\nfrankfurt_replication=172.16.228.2\ntest_public=54.152.127.180\ntest_replication=172.16.228.1\ndelft_public=62.204.93.103\ndelft_replication=172.16.228.1\n\ncreate_tunipip() {\n  if ! ip tunnel show | grep -q tunIPIP ; then\n    echo Creating tunnel tunIPIP\n    ip tunnel add tunIPIP mode ipip ttl 64 local \"$1\" remote \"$2\"\n  fi\n}\n\nadd_tunnel_address() {\n  if ! ip address show tunIPIP | grep -q \"$1\" ; then\n    ip address add \"$1/32\" peer \"$2/32\" dev tunIPIP\n  fi\n}\n\ncase $(hostname) in\n  ip-10-0-2-9)\n    create_tunipip 10.0.2.140 \"${frankfurt_public}\"\n    add_tunnel_address \"${test_replication}\" \"${frankfurt_replication}\"\n    ip link set tunIPIP up\n    ;;\n  ip-10-0-2-245)\n    create_tunipip 10.0.2.11 \"${frankfurt_public}\"\n    add_tunnel_address \"${test_replication}\" \"${frankfurt_replication}\"\n    ip link set tunIPIP up\n    ;;\n  ip-10-1-0-52|theo.gitlab.com)\n    create_tunipip 10.1.0.52 \"${delft_public}\"\n    add_tunnel_address \"${frankfurt_replication}\" \"${delft_replication}\"\n    ip link set tunIPIP up\n    ;;\n  linus|monty)\n    create_tunipip \"${delft_public}\" \"${frankfurt_public}\"\n    add_tunnel_address \"${delft_replication}\" \"${frankfurt_replication}\"\n    ip link set tunIPIP up\n    ;;\nesac\n```\n\nThis script was configured to run on boot. Note that it covers our Delft nodes\n(linus and monty, then current production), the node we were migrating to in\nFrankfurt (theo), and two AWS test nodes that were part of a staging setup. We\nchose the AWS Frankfurt (Germany) data center because of its geographic\nproximity to Delft (The Netherlands).\n\nWe configured IPsec with `/etc/ipsec-tools.conf`. An example for the 'origin'\nconfiguration would be:\n\n```\n#!/usr/sbin/setkey -f\n\n# Configuration for 172.16.228.1\n\n# Flush the SAD and SPD\nflush;\nspdflush;\n\n# Attention: Use this keys only for testing purposes!\n# Generate your own keys!\n\n# AH SAs using 128 bit long keys\n# Fill in your keys below!\nadd 172.16.228.1 172.16.228.2 ah 0x200 -A hmac-md5 0xfoobar;\nadd 172.16.228.2 172.16.228.1 ah 0x300 -A hmac-md5 0xbarbaz;\n\n# ESP SAs using 192 bit long keys (168 + 24 parity)\n# Fill in your keys below!\nadd 172.16.228.1 172.16.228.2 esp 0x201 -E 3des-cbc 0xquxfoo;\nadd 172.16.228.2 172.16.228.1 esp 0x301 -E 3des-cbc 0xbazqux;\n\n# Security policies\n# outbound traffic from 172.16.228.1 to 172.16.228.2\nspdadd 172.16.228.1 172.16.228.2 any -P out ipsec esp/transport//require ah/transport//require;\n\n# inbound traffic from 172.16.228.2 to 172.16.228.1\nspdadd 172.16.228.2 172.16.228.1 any -P in ipsec esp/transport//require ah/transport//require;\n```\n\nGetting the networking to this point took quite some work. For starters, we did\nnot have a staging environment similar enough to our production environment, so\nI had to create one for this occasion.\n\nOn top of that, to model our production setup, I had to use an AWS 'Virtual\nPrivate Cloud', which was new technology for us. It took a while before I\nfound some [vital information about using multiple IP\naddresses](http://engineering.silk.co/post/31923247961/multiple-ip-addresses-on-amazon-ec2)\nthat was not obvious from the AWS documentation: if you want to have two public\nIP addresses on an AWS VPC node, you need to put two corresponding private IP\naddresses on one 'Elastic Network Interface', instead of creating two network\ninterfaces with one private IP each.\n\n## Configuring three-way DRBD replication\n\nWith the basic networking figured out the next thing I had to do was to adapt\nour production failover script so that we maintain redundancy while migrating\nthe data. 'Failover' is a procedure where you move a service (gitlab.com) ove\nto a different computer after a failure. Our failover procedure is managed by a\nscript. My goal was to make sure that if one of our production servers failed,\nany teammate of mine on pager duty would be able to restore the gitlab.com\nservice using our normal failover procedure. That meant I had to update the\nscript to use the new three-way DRBD configuration.\n\nI certainly got a little more familiar with tcpdump (`tcpdump -n -i\nINTERFACE`), having multiple layers of firewalls\n([UFW](http://en.wikipedia.org/wiki/Uncomplicated_Firewall) and AWS [Security\nGroups](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html)),\nand how to get any useful log messages from DRBD:\n\n```\n# Monitor DRBD log messages\nsudo tail -f /var/log/messages | grep -e drbd -e d-con\n```\n\nI later learned that I actually deployed a new version of the failover script\nwith a bug in it that potentially could have confused the hell out of my\nteammates had they had to use it under duress. Luckily we never actually needed\nthe failover procedure during the time the new script was in production.\n\nBut, even though I was introducing new complexity and hence bugs into our\nfailover tooling, I did manage to learn and try out enough things to bring this\nproject to a successful conclusion.\n\n## Enabling the DRBD replication\n\nThis part was relatively easy. I just had to grow the DRBD block device\n'drbd0' so that it could accommodate the new stacked (inner) block device\n'drbd10' without having to shrink our production filesystem. Because drbd0 was\nbacked by LVM and we had some space left this was a matter of invoking\n`lvextend` and `drbdadm resize` on both our production nodes.\n\nThe step after this was the first one where I had to take gitlab.com offline.\nIn order to 'activate' drbd10 and start the synchronization, I had to unmount\n`/dev/drbd0` from `/var/opt/gitlab` and mount `/dev/drbd10` in its place. This\ntook less than 5 minutes. After this the actual migration was under way!\n\n## Too slow\n\nAt this point I was briefly excited to be able to share some good news with the\nrest of the team. While staring about the DRBD progress bar for the\nsynchronization I started to realize however that the progress bar was telling\nme that the synchronization would take about 50-60 days at 2MB/s.\n\nThis prognosis was an improvement over what we would expect based on our\nprevious experience moving 1.8TB from North Virginia (US) to Delft (NL) in\nabout two weeks (across the Atlantic Ocean!). If one would extrapolate that\nrate you would expect moving 9TB to take 70 days. We were disappointed\nnonetheless because we were hoping that we would gain more throughput by moving\nover a shorter distance this time around (Delft and Frankfurt are about 400km\napart).\n\nThe first thing I started looking into at this point was whether we could\nsomehow make better use of the network bandwidth at our disposal. Sending fake\ndata (zeroes) over the (encrypted) IPIP tunnel (`dd if=/dev/zero | nc remote_ip\n1234`) we could get about 17 MB/s. By disabling IPsec (not really an option as\nfar as I am concerned) we could increase that number to 40 MB/s.\n\nThe only conclusion I could come to was that we were not reaching our maximum\nbandwidth potential, but that I had no clue how to coax more speed out of the\nDRBD sync. Luckily I recalled reading about another magical DRBD feature.\n\n## Bring out the truck\n\nThe solution suggested by the DRBD documentation for situations like ours is\ncalled ['truck based\nreplication](https://drbd.linbit.com/users-guide/s-using-truck-based-replication.html).\nInstead of synchronizing 9TB of data, we would be telling DRBD to mark a point\nin time, take a full disk snapshot, move the snapshot to the new location (as a\nbox full of hard drives in a truck if needed), and then tell DRBD to get the\ndata at the new location up to date. During that 'catching-up' sync, DRBD would\nonly be resending those parts of the disk that actually changed since we marked\nthe point in time earlier. Because our users would not have written 9TB of new\ndata while the 'disks' were being shipped, we would have to sync much less than\n9TB.\n\n![Full replication versus 'truck' replication](https://about.gitlab.com/images/drbd/drbd-truck-sync.png)\n\nIn our case I would not have to use an actual truck; while testing the network\nthroughput between our old and new server I found that I could stream zeroes\nthrough SSH at about 35MB/s.\n\n```\ndd if=/dev/zero bs=1M count=100 | ssh theo.gitlab.com dd of=/dev/null\n```\n\nAfter doing some testing with the leftover two-node staging setup I built\nearlier to figure out the networking I felt I could make this work. I followed\nthe steps in the DRBD documentation, made an LVM snapshot on the active origin\nserver, and started sending the snapshot to the new server with the following\nscript.\n\n```\n#!/bin/sh\nblock_count=100\nblock_size='8M'\nremote='54.93.71.23'\n\nsend_blocks() {\n  for skip in $(seq $1 ${block_count} $2) ; do\n    echo \"${skip}   $(date)\"\n    sudo dd if=/dev/gitlab_vg/truck bs=${block_size} count=${block_count} skip=${skip} status=noxfer iflag=fullblock \\\n    | ssh -T ${remote} sudo dd of=/dev/gitlab_vg/gitlab_com bs=${block_size} count=${block_count} seek=${skip} status=none iflag=fullblock\n  done\n}\n\ncheck_blocks() {\n  for skip in $(seq $2 ${block_count} $3) ; do\n    printf \"${skip}   \"\n    sudo dd if=$1 bs=${block_size} count=${block_count} skip=${skip} iflag=fullblock | md5sum\n  done\n}\n\ncase $1 in\n  send)\n    send_blocks $2 $3\n    ;;\n  check)\n    check_blocks $2 $3 $4\n    ;;\n  *)\n    echo \"Usage: $0 (send START END) | (check BLOCK_DEVICE START END)\"\n    exit 127\nesac\n```\n\nBy running this script in a [screen](http://www.gnu.org/software/screen/)\nsession I was able to copy the LVM snapshot `/dev/gitlab_vg/truck` from the old\nserver to the new server in about 3.5 days, 800 MB at a time. The 800MB number\nwas a bit of a coincidence, stemming from the recommendation from our Dutch\nhosters [NetCompany](http://www.netcompany.nl/) to use 8MB `dd`-blocks. Also\ncoincidentally, the total disk size was divisible by 8MB. If you have an eye\nfor system security you might notice that the script needed both root\nprivileges on the source server, and via short-lived unattended SSH sessions\ninto the remote server (`| ssh sudo ...`). This is not a normal thing for us to\ndo, and my colleagues got spammed by warning messages about it while this\nmigration was in progress.\n\nBecause I am a little paranoid, I was running a second instance of this script\nin parallel with the sync, where I was calculating MD5 checksums of all the\nblocks that were being sent across the network. By calculating the same\nchecksums on the migration target I could gain sufficient confidence that all\ndata made across without errors. If there would have been any, the script would\nhave made it easy to re-send an individual 800MB block.\n\nAt this point my spirits were lifting again and I told my teammates we would\nprobably need one extra day after the 'truck' stage before we could start using\nthe new server. I did not know yet that 'one day' would become 'one week'.\n\n## Shipping too much data\n\nAfter moving the big snapshot across the network with\n[dd](http://en.wikipedia.org/wiki/Dd_%28Unix%29) and SSH, the next step would\nbe to 'just turn DRBD on and let it catch up'. But that did not work all of a\nsudden! It took me a while to realize that the problem was that while trucking,\nI had sent _too much_ data to the new server (theo). If you recall the picture\nI drew earlier of the three-way DRBD replication then you can see that the goal\nwas to replicate the 'green box' from the old servers to the new server, while\nletting the old servers keep sharing the 'blue box' for redundancy.\n\n![Blue box on the left, green box on the\nright](https://about.gitlab.com/images/drbd/drbd-too-much-data.png)\n\nBut I had just sent a snapshot of the _blue_ box to theo (the server on the\nright), not just the green box. DRBD was refusing to turn back on theo,\nbecause it was expecting the green box, not the blue box (containing the green\nbox). More precisely, my disk on the new server contained metadata for drbd0 as\nwell as drbd10. DRBD finds its metadata by starting at the end of the disk and\nwalking backwards. Because of that, it was not seeing the drbd10 (green)\nmetadata on theo.\n\n![Two metadata block](https://about.gitlab.com/images/drbd/drbd-two-metadata-blocks.png)\n\nThe first thing I tried was to shrink the disk (with\n[LVM](http://en.wikipedia.org/wiki/Logical_Volume_Manager_%28Linux%29)) so that\nthe blue block at the end would fall off. Unfortunately, you can only grow and\nshrink LVM disks in fixed steps (4MB steps in our case), and those steps did\nnot align with where the drbd10 metadata (green box) ended on disk.\n\nThe next thing I tried was to erase the blue block. That would leave DRBD\nunable to find any metadata, because DRBD metadata must sit at the end of the\ndisk. To cope with that I tried and trick DRBD into thinking it was in the\nmiddle of a disk resize operation. By manually creating a doctored\n`/var/lib/drbd/drbd-minor-10.lkbd` file used by DRBD when it does a\n(legitimate) disk resize, I was pointing it to where I thought it could find\nthe green block of drbd10 metadata. To be honest this required more disk sector\narithmetic than I was comfortable with. Comfortable or not, I never got this\nprocedure to work without a few screens full of scary DRBD error messages so I\ndecided to call our first truck expedition a bust.\n\n## One last try\n\nWe had just spent four days waiting for a 9TB chunk of data to be transported\nto our new server only to find out that it was getting rejected by DRBD. The\nonly option that seemed left to us was to sit back and wait 50-60 days for a\nregular DRBD sync to happen. There was just this one last thing I wanted to try\nbefore giving up. The stumbling block at this point was getting DRBD on theo to\nfind the metadata for the drbd10 disk. From reading the documentation, I knew\nthat DRBD has metadata export and import commands. What if we would take a new\nLVM snapshot in Delft, take the disk offline and export its metadata, and then\non the other hand do a metadata import with the proper DRBD import command\n(instead of me writing zeroes to the disk and lying to DRBD about being in the\nmiddle of a resize). This would require us to redo the truck dance and wait\nfour days, but four days was still better than 50 days.\n\nUsing the staging setup I built at the start of this process (a good time\ninvestment!) I created a setup that allowed me to test three-way replication\nand truck-based replication at the same time. Without having to do any\narithmetic I came up with an intimidating but reliable sequence of commands to\n(1) initiate truck based replication and (2) export the DRBD metadata.\n\n```\nsudo lvremove -f gitlab_vg/truck\n## clear the bitmap to mark the sync point in time\nsudo drbdadm disconnect --stacked gitlab_data-stacked\nsudo drbdadm new-current-uuid --clear-bitmap --stacked gitlab_data-stacked/0\n## create a metadata dump\necho Yes | sudo gitlab-drbd slave\nsudo drbdadm primary gitlab_data\nsudo drbdadm apply-al --stacked gitlab_data-stacked\nsudo drbdadm dump-md --stacked gitlab_data-stacked > stacked-md-$(date +%s).txt\n## Create a block device snapshot\nsudo lvcreate -n truck -s --extents 50%FREE gitlab_vg/drbd\n## Turn gitlab back on\necho Yes |sudo gitlab-drbd slave\necho Yes |sudo gitlab-drbd master\n## Make sure the current node will 'win' as primary later on\nsudo drbdadm new-current-uuid --stacked gitlab_data-stacked/0\n```\n\nThis time I needed to take gitlab.com offline for a few minutes to be able to\ndo the metadata export. After that, a second waiting period of 4 days of\nstreaming the disk snapshot with `dd` and `ssh` commenced. And then came the\nbig moment of turning DRBD back on theo. It worked! Now I just had to wait\nfor the changes on disk of the last four days to be replicated (which took\nabout a day) and we were ready to flip the big switch, update the DNS and start\nserving gitlab.com from AWS. That final transition took another 10 minutes of\ndowntime, and then we were done.\n\n## Looking back\n\nAs soon as we flipped the switch and started operating out of AWS/Frankfurt,\ngitlab.com became noticeably more responsive. This is in spite of the fact that\nwe are _still_ running on a single server (an [AWS\nc3.8xlarge](http://aws.amazon.com/ec2/instance-types/#c3) instance at the\nmoment).\n\nCounting from the moment I was tasked to work on this data migration, we were\nable to move a 9TB filesystem to a different data center and hosting provider\nin three weeks, requiring 20 minutes of total downtime (spread over three\nmaintenance windows). We took an operational risk of prolonged downtime due to\noperator confusion in case of incidents, by deploying a new configuration that\nwhile tested to some degree was understood by only one member of the operations\nteam (myself). We were lucky that there was no incident during those three\nweeks that made this lack of shared knowledge a problem.\n\nNow if you will excuse me I have to go and explain to my colleagues how our\nnew gitlab.com infrastructure on AWS is set up. :)\n",{"slug":9479,"featured":6,"template":678},"moving-all-your-data","content:en-us:blog:moving-all-your-data.yml","Moving All Your Data","en-us/blog/moving-all-your-data.yml","en-us/blog/moving-all-your-data",{"_path":9485,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9486,"content":9492,"config":9496,"_id":9498,"_type":16,"title":9499,"_source":17,"_file":9500,"_stem":9501,"_extension":20},"/en-us/blog/how-to-install-gitlab",{"title":9487,"description":9488,"ogTitle":9487,"ogDescription":9488,"noIndex":6,"ogImage":9489,"ogUrl":9490,"ogSiteName":692,"ogType":693,"canonicalUrls":9490,"schema":9491},"How to install GitLab on your own domain","Want to get your first own GitLab instance running? Use these easy to follow instructions and you will be active in no time!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749684373/Blog/Hero%20Images/installing.jpg","https://about.gitlab.com/blog/how-to-install-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to install GitLab on your own domain\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Job van der Voort\"}],\n        \"datePublished\": \"2015-02-24\",\n      }",{"title":9487,"description":9488,"authors":9493,"heroImage":9489,"date":9494,"body":9495,"category":14},[9372],"2015-02-24","\n\nWant to get your first own GitLab instance running?\nWe're here to help!\n\nThis is what you need to do:\n\n1. Get a VM\n2. Point your domain to GitLab\n3. Install GitLab\n4. Use GitLab\n\n\u003C!-- more -->\n\n\u003Cbr />\n\n## 1. Get a VM\n\nIt's a good time to want to have a VM.\nYou can get one cheap and easily from:\n\n- DigitalOcean\n- Amazon AWS\n- Google Compute\n- Microsoft Azure\n- Dreamhost\n- Linode\n\nIf it's just for you, almost any size will do.\nWe [recommend](https://gitlab.com/gitlab-org/gitlab-ce/blob/master/doc/install/requirements.md#cpu)\nto get a VM with 2 cores and 2GB of memory.\nIt can handle you and all your friends.\n\nDigitalOcean is particularly nice to use and even has a one-click install\nfor GitLab, which bypasses this tutorial completely!\n\nIf you have the option to set the domain name in the process,\nset it to the domain where you want your GitLab domain to be.\n\n![set hostname](https://about.gitlab.com/images/how_to/hostname.png)\n\nYou can create a VM with any of the supported OS's,\nbut if you have no preference, use Ubuntu 14.04 x64.\n\n![choose os](https://about.gitlab.com/images/how_to/choose_os.png)\n\nCreate the VM and take note of the assigned IP for your domain.\n\n![take not of ip](https://about.gitlab.com/images/how_to/ip.png)\n\n## 2. Point your domain to GitLab\n\nAt the place you bought your domain name,\nyou want to point your GitLab domain to the IP address you noted.\n\nCreate an A-Type record set with the GitLab domain as name\nand the IP address as value.\n\nThat may sound arcane, but most domain-resellers make it quite easy.\n\nIn my case, I'm pointing `104.236.58.42` (IP) to `gitlab.jobvandervoort.com`.\n\n## 3. Install GitLab\n\nGo into your new VM by pointing your terminal to your new domain:\n\n```\nssh root@gitlab.jobvandervoort.com\n```\n\nIf you're using DigitalOcean, you can also use their web console.\n\nNext, install Postfix:\n\n```\nsudo apt-get install postfix\n```\n\nSelect `Internet site`. Just keep hitting ENTER. The defaults will work.\n\nDownload and unpack GitLab! We're getting GitLab 7.8.0 Community Edition.\nIt comes together with GitLab CI 7.8.0 and is full of awesomeness.\n\n```\nwget https://downloads-packages.s3.amazonaws.com/ubuntu-14.04/gitlab_7.8.0-omnibus-1_amd64.deb\nsudo dpkg -i gitlab_7.8.0-omnibus-1_amd64.deb\n```\n\nConfigure and start GitLab:\n\n```\nsudo gitlab-ctl reconfigure\n```\n\n## 4. Use GitLab\n\nGo to your domain and sign in! Use these credentials for the first time:\n\n```\nUsername: root\nPassword: 5iveL!fe\n```\n\nHave fun!\n",{"slug":9497,"featured":6,"template":678},"how-to-install-gitlab","content:en-us:blog:how-to-install-gitlab.yml","How To Install Gitlab","en-us/blog/how-to-install-gitlab.yml","en-us/blog/how-to-install-gitlab",{"_path":9503,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9504,"content":9510,"config":9514,"_id":9516,"_type":16,"title":9517,"_source":17,"_file":9518,"_stem":9519,"_extension":20},"/en-us/blog/gitlab-reduced-merge-conflicts-by-90-percent-with-changelog-placeholders",{"title":9505,"description":9506,"ogTitle":9505,"ogDescription":9506,"noIndex":6,"ogImage":9507,"ogUrl":9508,"ogSiteName":692,"ogType":693,"canonicalUrls":9508,"schema":9509},"GitLab reduced merge conflicts by 90% with changelog placeholders","By utilising changelog placeholders GitLab reduced merge conflicts by 90 percent! Read more here.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683633/Blog/Hero%20Images/changelog.jpg","https://about.gitlab.com/blog/gitlab-reduced-merge-conflicts-by-90-percent-with-changelog-placeholders","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab reduced merge conflicts by 90% with changelog placeholders\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2015-02-10\",\n      }",{"title":9505,"description":9506,"authors":9511,"heroImage":9507,"date":9512,"body":9513,"category":14},[2269],"2015-02-10","\nGitLab has a very active development cycle with many features being added to its monthly release by\n[more than 700 contributors](http://contributors.gitlab.com/).\nLike many projects it has a\n[changelog file](https://gitlab.com/gitlab-org/gitlab-ce/blob/master/CHANGELOG)\nthat details all significant new features, bugfixes and changes to behaviour.\nEvery pull/merge request author is [encouraged](https://gitlab.com/gitlab-org/gitlab-ce/blob/master/CONTRIBUTING.md#merge-request-guidelines)\nto add a line to this changelog. Unfortunately, the amount of merge requests lead to a time-consuming problem.\n\n\u003C!-- more -->\n\n## Problem\n\n[![:(](https://about.gitlab.com/images/conflict.png)](/images/conflict.png)\n\nThe order in which merge requests are accepted is not know in advance.\nMost merge request added a new line to the changelog the end of the upcoming version.\nThis meant that upon merging a single merge request, all other unmerged MRs were immediately broken, since they modified the same line.\nThe authors of the merge requests had to solve the merge conflict with a commit or rebase before their code could be merged in the web UI.\n\n## Update\n\nayufan suggested a much better solution in the comments of this article.\nIf you add `CHANGELOG merge=union` to the .gitattributes file in the root of the repo you should not have any conflicts.\nInstead of leaving conflicts the [union merge option](http://git-scm.com/docs/git-merge-file) will resolve conflicts favouring both side of the lines.\nAn example of such a setting change is in the [endgamesingularity repo](https://code.google.com/p/endgame-singularity/source/browse/.gitattributes).\nThanks ayufan, we'll try this instead.\n\n## Solution\n\nAt GitLab we solved the above problem by adding a 100 lines with just a hyphen placeholder at the top of the changelog.\nPeople can insert their entry at a random location in the changelog.\nThere is still a chance of conflict when two merge requests change the same line but it is greatly reduced.\nIt looks a bit strange to have these empty lines on top so we added a [comment to explain their purpose.](https://gitlab.com/gitlab-org/gitlab-ce/blob/master/CHANGELOG#L1).\n\n[![What developer happiness looks like](https://about.gitlab.com/images/accept.png)](/images/accept.png)\n\nAs [part of the monthly release process](https://gitlab.com/gitlab-org/gitlab-ce/blob/master/doc/release/monthly.md#prepare-changelog-for-next-release) all the empty lines are removed and new ones for the upcoming release are added.\nThe placeholders ensure that lead GitLab developer Dmitriy can merge new contributions easily from his iPad.\nWhich has led to more late-night merging activity making everyone happy.\n\n## What do you do?\n\nWe hope this article inspires other open and closed source projects to reduce their merge conflicts.\nOr if you don't have a changelog yet to start maintaining one, it sure beats doing git diffs all the time.\nPlease let us know if you are inspired or if you have any other tricks to share in the comments.\n",{"slug":9515,"featured":6,"template":678},"gitlab-reduced-merge-conflicts-by-90-percent-with-changelog-placeholders","content:en-us:blog:gitlab-reduced-merge-conflicts-by-90-percent-with-changelog-placeholders.yml","Gitlab Reduced Merge Conflicts By 90 Percent With Changelog Placeholders","en-us/blog/gitlab-reduced-merge-conflicts-by-90-percent-with-changelog-placeholders.yml","en-us/blog/gitlab-reduced-merge-conflicts-by-90-percent-with-changelog-placeholders",{"_path":9521,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9522,"content":9528,"config":9533,"_id":9535,"_type":16,"title":9536,"_source":17,"_file":9537,"_stem":9538,"_extension":20},"/en-us/blog/7-reasons-why-you-should-be-using-ci",{"title":9523,"description":9524,"ogTitle":9523,"ogDescription":9524,"noIndex":6,"ogImage":9525,"ogUrl":9526,"ogSiteName":692,"ogType":693,"canonicalUrls":9526,"schema":9527},"7 reasons why you should be using Continuous Integration","View the 7 resons why you and your business should be using Continuous Integration for exceptional results!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749684339/Blog/Hero%20Images/vespa.jpg","https://about.gitlab.com/blog/7-reasons-why-you-should-be-using-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"7 reasons why you should be using Continuous Integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Patricio Cano\"}],\n        \"datePublished\": \"2015-02-03\",\n      }",{"title":9523,"description":9524,"authors":9529,"heroImage":9525,"date":9531,"body":9532,"category":14},[9530],"Patricio Cano","2015-02-03","\n\nWhen developing software you are usually spoiled for choice. There are many languages you can choose from, different\ntest suites to try, countless frameworks you can use and many Continuous Integration (CI) offerings.\n\nYou can always go with the language you like the most, the test suite you find most practical and choose not to use a\nframework, but you should always think about CI.\n\nContinuous Integration is a way to increase code quality without putting an extra burden on the developers.\nTests and checks of your code are handled on a server and automatically reported back to you.\n\nHere are out top 7 reasons why we think you should be using CI and why you should consider it from the beginning of your\nproject.\n\n\u003C!-- more -->\n\n## 1. Run your tests in the real world\n\nHave you ever had your tests pass on your machine, but fail on someone else's? Well, with CI you can avoid that embarrassment.\nJust push your code to your new branch and the CI server will take care of running the tests for you. If everything is\ngreen, you can be sure that you didn't break anything. And if they fail for someone else, you have evidence that they\nare wrong.\n\n## 2. Increase your code coverage\n\nThink your tests cover most of your code? Well think again. A CI server can check your code\nfor test coverage. Now, every time you commit something new without any tests,\nyou will feel the shame that comes with having your coverage percentage go down because of your changes.\n\n## 3. Deploy your code to production\n\nYou can have the CI server automatically deploy your code to production if all the test within a given branch are green.\nThis is what is formally known as **Continuous Deployment**, or **Oh my God, that was scary, I'm glad my code worked!**\nin some circles.\n\n## 4. Build stuff now\n\nAll your tests are green and the coverage is good, but you don't handle code that needs to be deployed? No worries! CI\nservers can also trigger build and compilation processes that will take care of your needs in no time. No more having to\nsit in front of your terminal waiting for the build to finish, only to have it fail at the last second. The CI server will\nrun this for you within its scripts and notify you as soon as something goes wrong.\n\n## 5. Build stuff faster\n\nWith parallel build support, you can split your tests and build processes into different machines, so everything will\nfinish even faster than if you ran it locally. It will also consume less local power and resources, so you can continue\nworking on something else while the builds run.\n\n## 6. Don't break stuff\n\nHaving your code tested before and after it is merged will allow you to decrease the amount of times your master build\nis broken. Don't wait until it's too late, test yo-self, before you brake yo-self.\n\n## 7. Decrease code review time\n\nYou can have your CI and Version Control Server communicate with each other and tell you when a merge request is\ngood to merge. It can also show how the code coverage would be affected by it. This can dramatically reduce the time\nit takes to review a merge request.\n\n## GitLab CI\n\nOf course if you are in the market for a CI server, we kindly encourage you to use [GitLab CI](/solutions/continuous-integration/),\nespecially because it includes all features we mentioned here and more!\n\nWant to give it a try right now? Grab a [runner package](https://gitlab.com/gitlab-org/omnibus-gitlab-runner/blob/master/doc/install/README.md)\ninstall it on your server and you can have free CI for your private repositories on GitLab.com and [https://ci.gitlab.com/](https://ci.gitlab.com/)\n",{"slug":9534,"featured":6,"template":678},"7-reasons-why-you-should-be-using-ci","content:en-us:blog:7-reasons-why-you-should-be-using-ci.yml","7 Reasons Why You Should Be Using Ci","en-us/blog/7-reasons-why-you-should-be-using-ci.yml","en-us/blog/7-reasons-why-you-should-be-using-ci",{"_path":9540,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9541,"content":9546,"config":9550,"_id":9552,"_type":16,"title":9553,"_source":17,"_file":9554,"_stem":9555,"_extension":20},"/en-us/blog/ship-log-data-off-site-using-udp",{"title":9542,"description":9543,"ogTitle":9542,"ogDescription":9543,"noIndex":6,"ogImage":2478,"ogUrl":9544,"ogSiteName":692,"ogType":693,"canonicalUrls":9544,"schema":9545},"How you can send your logs ballistically using UDP","With GitLab Enterprise Edition 7.1 and up Omnibus packages, we introduced UDP log shipping.","https://about.gitlab.com/blog/ship-log-data-off-site-using-udp","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How you can send your logs ballistically using UDP\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Job van der Voort\"}],\n        \"datePublished\": \"2014-12-08\",\n      }",{"title":9542,"description":9543,"authors":9547,"heroImage":2478,"date":9548,"body":9549,"category":14},[9372],"2014-12-08","\n\nThe last thing you want to happen is having problems on your server due to your logs.\nYou need them to debug problems, not cause them. With large GitLab instances, it's\na great idea to ship your logs to a separate server. This way, they are easier to manage\nand you don't need to worry about space. But by using TCP to send the logs you risk\na connection failure and subsequent problems with your instance.\n\nTherefore, with GitLab Enterprise Edition (7.1 and up) Omnibus packages,\nwe introduced UDP log shipping.\nAs opposed to TCP, UDP doesn't care about whether packets get received,\nit keeps sending them in a non-blocking, fire-and-forget manner.\nThat makes UDP really fast, lightweight and if you log server crashes, it won't\naffect your GitLab instance. UDP doesn't care.\n\n\u003C!-- more -->\n\n## Setting up UDP log shipping\n\nUDP log shipping is very easy to set up. You simply add the following lines to `/etc/gitlab/gitlab.rb`:\n\n```\nlogging['udp_log_shipping_host'] = '1.2.3.4' # Your syslog server\nlogging['udp_log_shipping_port'] = 1514 # Optional, defaults to 514 (syslog)\n```\n\nAnd run `sudo gitlab-ctl reconfigure`. Now your logs will be shipped speedily to `1.2.3.4:1514`!\n\nAn example of what your syslog server will receive:\n\n```\n\u003C13>Jun 26 06:33:46 ubuntu1204-test production.log: Started GET \"/root/my-project/import\" for 127.0.0.1 at 2014-06-26 06:33:46 -0700\n\u003C13>Jun 26 06:33:46 ubuntu1204-test production.log: Processing by ProjectsController#import as HTML\n\u003C13>Jun 26 06:33:46 ubuntu1204-test production.log: Parameters: {\"id\"=>\"root/my-project\"}\n\u003C13>Jun 26 06:33:46 ubuntu1204-test production.log: Completed 200 OK in 122ms (Views: 71.9ms | ActiveRecord: 12.2ms)\n```\n\n## How it works\n\nThe services of GitLab write log messages to logfiles (such as `production.log`)\nor to STDOUT. GitLab Omnibus packages use [svlogd](http://smarden.org/runit/svlogd.8.html)\nto log STDOUT to for instance `sidekiq/current`.\n\nsvlogd is great, as it allows us to ship these logs using UDP and even rotate them.\nHowever, it can't work with `.log` files. So in addition to svlogd, we make\nuse of [remote_syslog](https://github.com/papertrail/remote_syslog), which can work\nwith `.log` files and allows us to ship the using UDP.\n\nBy using a single configuration option in the Omnibus package, as shown above,\nwe were able to make UDP log shipping simple to set up and flexible enough to work\nwith both `.log` files and STDOUT logging.\n\n\n## About GitLab\n\nWant to start to use UDP log shipping? Check out [GitLab Enterprise Edition](/pricing/feature-comparison/).\nA subscription also includes support, deep LDAP integration, git hooks, Jenkins integration and many more powerful enterprise features.\n\nYou can try GitLab by [downloading](/install/) the Community Edition and installing it on your own server or by signing up to our free, unlimited GitLab instance [GitLab.com](https://gitlab.com/users/sign_up).\n\nSee our previous feature highlights:\n\n- [Groups](/blog/feature-highlight-groups/)\n- [Git Hooks](/blog/feature-highlight-git-hooks/)\n- [Branded Login](/blog/feature-highlight-branded-login-gitlab-ee/)\n- [LDAP Integration](/blog/feature-highlight-ldap-sync/)\n",{"slug":9551,"featured":6,"template":678},"ship-log-data-off-site-using-udp","content:en-us:blog:ship-log-data-off-site-using-udp.yml","Ship Log Data Off Site Using Udp","en-us/blog/ship-log-data-off-site-using-udp.yml","en-us/blog/ship-log-data-off-site-using-udp",{"_path":9557,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9558,"content":9563,"config":9567,"_id":9569,"_type":16,"title":9570,"_source":17,"_file":9571,"_stem":9572,"_extension":20},"/en-us/blog/gitlab-omnibus-packages-now-include-gitlab-ci",{"title":9559,"description":9560,"ogTitle":9559,"ogDescription":9560,"noIndex":6,"ogImage":2478,"ogUrl":9561,"ogSiteName":692,"ogType":693,"canonicalUrls":9561,"schema":9562},"GitLab Omnibus packages now include GitLab CI","Today we are excited to announce that our Omnibus packages now include the GitLab CI Coordinator.","https://about.gitlab.com/blog/gitlab-omnibus-packages-now-include-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Omnibus packages now include GitLab CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jacob Vosmaer\"}],\n        \"datePublished\": \"2014-11-06\",\n      }",{"title":9559,"description":9560,"authors":9564,"heroImage":2478,"date":9565,"body":9566,"category":14},[3103],"2014-11-06","\n\nBack in February of this year, we radically simplified the installation process\nof GitLab with the [first release of our Omnibus\npackages for GitLab](/blog/gitlab-is-now-simple-to-install/). Today we are excited\nto announce that our Omnibus packages now include the [GitLab CI](/solutions/continuous-integration/)\nCoordinator.\n\nTo start using GitLab CI on your GitLab server you need to take the following steps:\n\n- [download and install](/install/) the latest Omnibus package for your platform;\n- create a DNS record for GitLab CI pointing to your GitLab server, e.g. `ci.example.com`;\n- add the following line to `/etc/gitlab/gitlab.rb`:\n\n```\n# External URL to reach the GitLab CI Coordinator at\nci_external_url 'http://ci.example.com'\n```\n\nThen run `sudo gitlab-ctl reconfigure` and you have a CI Coordinator running on\nyour GitLab server, integrated with GitLab!\n\n\u003C!-- more -->\n\nTo start running your builds, set up one or more [GitLab CI\nRunners](https://gitlab.com/gitlab-org/gitlab-ci-runner/blob/master/README.md).\n\nThe Omnibus-specific documentation for GitLab CI Coordinator can be found [in\nthe Omnibus-GitLab\nrepo](https://gitlab.com/gitlab-org/omnibus-gitlab/tree/master/doc/gitlab-ci).\n\nIf you want to run the GitLab CI Coordinator on a separate server from your\nGitLab server you can [disable the GitLab\nservices bundled in the Omnibus packages](https://gitlab.com/gitlab-org/omnibus-gitlab/tree/master/doc/gitlab-ci/README.md#running-gitlab-ci-on-its-own-server).\n\n## Under the hood\n\nRunning GitLab CI in the standard configuration (2 Unicorn workers) will\nrequire about 500MB of RAM.\n\nBy bundling the GitLab CI Coordinator into the Omnibus packages we are able to\nreuse the bundled Ruby, Postgres, NGINX and Redis, as well as the `gitlab-ctl`\nutility. Because of all this reuse of available components, GitLab CI is adding\nonly about 20MB of data to the package downloads. If you are not using GitLab\nCI you will not notice that it is there.\n\n_Update 2014-11-06 18:17 CET:_ Fixed the date attribute on the blog post.\n",{"slug":9568,"featured":6,"template":678},"gitlab-omnibus-packages-now-include-gitlab-ci","content:en-us:blog:gitlab-omnibus-packages-now-include-gitlab-ci.yml","Gitlab Omnibus Packages Now Include Gitlab Ci","en-us/blog/gitlab-omnibus-packages-now-include-gitlab-ci.yml","en-us/blog/gitlab-omnibus-packages-now-include-gitlab-ci",{"_path":9574,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":9575,"content":9580,"config":9584,"_id":9586,"_type":16,"title":9587,"_source":17,"_file":9588,"_stem":9589,"_extension":20},"/en-us/blog/gitlab-without-gitolite",{"title":9576,"description":9577,"ogTitle":9576,"ogDescription":9577,"noIndex":6,"ogImage":7992,"ogUrl":9578,"ogSiteName":692,"ogType":693,"canonicalUrls":9578,"schema":9579},"GitLab without gitolite","Yeap GitLab 5.0 will be without gitolite. Read this artlicle to learn more.","https://about.gitlab.com/blog/gitlab-without-gitolite","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab without gitolite\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2013-02-12\",\n      }",{"title":9576,"description":9577,"authors":9581,"heroImage":7992,"date":9582,"body":9583,"category":14},[890],"2013-02-12","### Yeap GitLab 5.0 will be without gitolite\n\nGitolite was a real help when we started with GitLab. \nIt saves us a lot of work and provide a pretty functional solution.\n\nBut after time we understood - keep gitlab and gitolite synced is harder than build own solution.\n\n#### Problems: \n\n* out of sync between gitlab and gitolite\n* gitolite becomes slower on bigger count of repos\n* 2 system users increased complexity of setup\n* code complexity, easy to make a mistake\n* gitolite does not allow to create repositories, keys in simultaneously\n\n\u003C!-- more -->\n\n#### GitLab Shell\n\nGitLab Shell is my replacement for gitolite.\n\nBasically it's a few scripts on ruby and shell for managing `/home/git/.ssh/authorized_keys` and `/home/git/repositories`\n\nYou can find [source code on github](https://github.com/gitlabhq/gitlab-shell.git)\n\n#### 2 users -> 1 user\n\nEarlier we have 2 users for GitLab. gitlab for GitLab and git for gitolite. \n\nNow its only one `git` user for GitLab and GitLab Shell\n\n#### New GitLab directory structure\n\nThis is the directory structure you will end up with following the instructions in the Installation Guide.\n\n    |-- home\n    |   |-- git\n    |       |-- .ssh\n    |       |-- gitlab\n    |       |-- gitlab-satellites\n    |       |-- gitlab-shell\n    |       |-- repositories\n\n#### PROFIT\n\n##### 1. Amount of code \n\nIts was ~ 1000 lines of code related to gitolite inside of GitLab. And one library for parsing gitolite config. \n\nNow it is only 150 lines of pretty simple code related to gitlab shell. \n\n##### 2. Performance \n\nFor [https://gitlab.com](https://gitlab.com) we decreased project creation time in 10 times. \n\n##### 3. Stability\n\nGitLab Shell does not store Access Control List. It asks GitLab for permissions via api. \n\nNo out of sync between GitLab and GitLab Shell == much stable backend solution.\n\n##### 4. Simplicity of update\n\nYou can update gitlab-shell just by `git pull`\n\nYou dont even need to restart gitlab service\n\n- - -\n\n### Feb 22: GitLab 4.2 - last release with gitolite\n### Mar 22: GitLab 5.0 - requires gitlab-shell\n\n- - -\n\nCurrently I'm working on migration docs to GitLab 5.0.0pre. You can find a [draft here](https://github.com/gitlabhq/gitlabhq/wiki/From-4.2-to-5.0)\n",{"slug":9585,"featured":6,"template":678},"gitlab-without-gitolite","content:en-us:blog:gitlab-without-gitolite.yml","Gitlab Without Gitolite","en-us/blog/gitlab-without-gitolite.yml","en-us/blog/gitlab-without-gitolite",52,[685,711,735,757,778,799,819,839,859],1753799761051]